/* Initialize the PCI core. It's caller's responsibility to make sure that this is done * only once */ void * pcicore_init(si_t *sih, osl_t *osh, void *regs) { pcicore_info_t *pi; ASSERT(sih->bustype == PCI_BUS); /* alloc pcicore_info_t */ if ((pi = MALLOC(osh, sizeof(pcicore_info_t))) == NULL) { PCI_ERROR(("pci_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); return (NULL); } bzero(pi, sizeof(pcicore_info_t)); pi->sih = sih; pi->osh = osh; if (sih->buscoretype == PCIE_CORE_ID) { uint8 cap_ptr; pi->regs.pcieregs = (sbpcieregs_t*)regs; cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID, NULL, NULL); ASSERT(cap_ptr); pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET; } else pi->regs.pciregs = (sbpciregs_t*)regs; return pi; }
bcm_notif_module_t* BCMATTACHFN(bcm_notif_attach)(osl_t *osh, bcm_mpm_mgr_h mpm, int max_notif_servers, int max_notif_clients) { bcm_notif_module_t *notif_module; int ret; /* Allocate global notifier module state. */ if ((notif_module = MALLOC(osh, sizeof(*notif_module))) == NULL) { NOTIF_ERROR(("%s: out of mem, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); goto fail; } /* Init global notifier module state. */ memset(notif_module, 0, sizeof(*notif_module)); notif_module->osh = osh; notif_module->mpm = mpm; /* Create memory pool for server objects. */ ret = bcm_mpm_create_prealloc_pool(mpm, sizeof(struct bcm_notif_list_struct), max_notif_servers, NULL, 0, rstr_notif_s, ¬if_module->server_mem_pool); if (ret != BCME_OK) { goto fail; } /* Create memory pool for client objects. */ ret = bcm_mpm_create_prealloc_pool(mpm, sizeof(struct bcm_notif_client_request), max_notif_clients, NULL, 0, rstr_notif_c, ¬if_module->client_mem_pool); if (ret != BCME_OK) { goto fail; } /* Success. */ return (notif_module); fail: dealloc_module(notif_module); return (NULL); }
/* Allocate private resource */ adm_info_t * adm_attach(si_t *sih, char *vars) { adm_info_t *adm; int gpio; /* Allocate private data */ if (!(adm = MALLOC(si_osh(sih), sizeof(adm_info_t)))) { ET_ERROR(("adm_attach: out of memory, malloc %d bytes", MALLOCED(si_osh(sih)))); return NULL; } bzero((char *) adm, sizeof(adm_info_t)); adm->sih = sih; adm->vars = vars; /* Init GPIO mapping. Default GPIO: 2, 3, 4 */ gpio = getgpiopin(vars, "adm_eecs", 2); ET_ERROR(("adm_attach: got %d as adm_eecs", gpio)); if (gpio == GPIO_PIN_NOTDEFINED) { ET_ERROR(("adm_attach: adm_eecs gpio fail: GPIO 2 in use")); goto error; } adm->eecs = 1 << gpio; gpio = getgpiopin(vars, "adm_eesk", 3); ET_ERROR(("adm_attach: got %d as adm_eesk", gpio)); if (gpio == GPIO_PIN_NOTDEFINED) { ET_ERROR(("adm_attach: adm_eesk gpio fail: GPIO 3 in use")); goto error; } adm->eesk = 1 << gpio; gpio = getgpiopin(vars, "adm_eedi", 4); ET_ERROR(("adm_attach: got %d as adm_eedi", gpio)); if (gpio == GPIO_PIN_NOTDEFINED) { ET_ERROR(("adm_attach: adm_eedi gpio fail: GPIO 4 in use")); goto error; } adm->eedi = 1 << gpio; return adm; error: adm_detach(adm); return NULL; }
/** * Initialize the PCI core. It's caller's responsibility to make sure that this is done * only once */ void * pcicore_init(si_t *sih, osl_t *osh, void *regs) { pcicore_info_t *pi; uint8 cap_ptr; ASSERT(sih->bustype == PCI_BUS); /* alloc pcicore_info_t */ if ((pi = MALLOC(osh, sizeof(pcicore_info_t))) == NULL) { PCI_ERROR(("pci_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); return (NULL); } bzero(pi, sizeof(pcicore_info_t)); pi->sih = sih; pi->osh = osh; if (sih->buscoretype == PCIE2_CORE_ID) { pi->regs.pcieregs = (sbpcieregs_t*)regs; cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID, NULL, NULL); ASSERT(cap_ptr); pi->pciecap_devctrl_offset = cap_ptr + PCIE_CAP_DEVCTRL_OFFSET; pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET; pi->pciecap_devctrl2_offset = cap_ptr + PCIE_CAP_DEVCTRL2_OFFSET; pi->pciecap_ltr0_reg_offset = cap_ptr + PCIE_CAP_LTR0_REG_OFFSET; pi->pciecap_ltr1_reg_offset = cap_ptr + PCIE_CAP_LTR1_REG_OFFSET; pi->pciecap_ltr2_reg_offset = cap_ptr + PCIE_CAP_LTR2_REG_OFFSET; } else if (sih->buscoretype == PCIE_CORE_ID) { pi->regs.pcieregs = (sbpcieregs_t*)regs; cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID, NULL, NULL); ASSERT(cap_ptr); pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET; pi->pciecap_devctrl_offset = cap_ptr + PCIE_CAP_DEVCTRL_OFFSET; pi->pciecap_devctrl2_offset = cap_ptr + PCIE_CAP_DEVCTRL2_OFFSET; pi->pciecap_ltr0_reg_offset = cap_ptr + PCIE_CAP_LTR0_REG_OFFSET; pi->pciecap_ltr1_reg_offset = cap_ptr + PCIE_CAP_LTR1_REG_OFFSET; pi->pciecap_ltr2_reg_offset = cap_ptr + PCIE_CAP_LTR2_REG_OFFSET; pi->pcie_power_save = TRUE; /* Enable pcie_power_save by default */ } else pi->regs.pciregs = (sbpciregs_t*)regs; return pi; }
void* etc_attach(void *et, uint vendor, uint device, uint unit, void *osh, void *regsva) { etc_info_t *etc; ET_TRACE(("et%d: etc_attach: vendor 0x%x device 0x%x\n", unit, vendor, device)); /* some code depends on packed structures */ ASSERT(sizeof(struct ether_addr) == ETHER_ADDR_LEN); ASSERT(sizeof(struct ether_header) == ETHER_HDR_LEN); /* allocate etc_info_t state structure */ if ((etc = (etc_info_t*) MALLOC(osh, sizeof(etc_info_t))) == NULL) { ET_ERROR(("et%d: etc_attach: out of memory, malloced %d bytes\n", unit, MALLOCED(osh))); return (NULL); } bzero((char*)etc, sizeof(etc_info_t)); etc->et = et; etc->unit = unit; etc->osh = osh; etc->vendorid = (uint16) vendor; etc->deviceid = (uint16) device; etc->forcespeed = ET_AUTO; etc->linkstate = FALSE; /* set chip opsvec */ etc->chops = etc_chipmatch(vendor, device); ASSERT(etc->chops); /* chip attach */ if ((etc->ch = (*etc->chops->attach)(etc, osh, regsva)) == NULL) { ET_ERROR(("et%d: chipattach error\n", unit)); goto fail; } return ((void*)etc); fail: etc_detach(etc); return (NULL); }
/* * Allocate a si handle. * devid - pci device id (used to determine chip#) * osh - opaque OS handle * regs - virtual address of initial core registers * bustype - pci/pcmcia/sb/sdio/etc * vars - pointer to a pointer area for "environment" variables * varsz - pointer to int to return the size of the vars */ si_t * si_attach(uint devid, osl_t *osh, void *regs, uint bustype, void *sdh, char **vars, uint *varsz) { si_info_t *sii; /* alloc si_info_t */ if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) { SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); return (NULL); } if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) { MFREE(osh, sii, sizeof(si_info_t)); return (NULL); } sii->vars = vars ? *vars : NULL; sii->varsz = varsz ? *varsz : 0; return (si_t *)sii; }
/* allocate one page of cached, shared (dma-able) or regular memory */ NDIS_STATUS shared_allocpage( IN shared_info_t *shared, IN BOOLEAN shared_mem, IN BOOLEAN cached, OUT page_t *page ) { NDIS_STATUS status; /* uncached not supported */ ASSERT(cached); bzero(page, sizeof(page_t)); if (shared_mem) { NdisMAllocateSharedMemory(shared->adapterhandle, PAGE_SIZE, cached, (void **) &page->va, &page->pa); /* Make sure that we got valid address */ if (!osl_dmaddr_valid(shared->osh, page->pa.LowPart, page->pa.HighPart)) { ND_ERROR(("%s%d: shared_allocpage: pa not valid \n", shared->id, shared->unit)); NdisMFreeSharedMemory(shared->adapterhandle, PAGE_SIZE, cached, (PVOID)page->va, page->pa); return (NDIS_STATUS_RESOURCES); } } else page->va = MALLOC(shared->osh, PAGE_SIZE); if (page->va == NULL) { ND_ERROR(("%s%d: shared_allocpage: out of memory, malloced %d bytes\n", shared->id, shared->unit, MALLOCED(shared->osh))); return (NDIS_STATUS_RESOURCES); } ASSERT(!shared_mem || ISALIGNED((uintptr)page->va, PAGE_SIZE)); ASSERT(page->pa.HighPart == 0); ASSERT(ISALIGNED(page->pa.LowPart, PAGE_SIZE)); status = NDIS_STATUS_SUCCESS; return (status); }
static si_info_t * si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, uint bustype, void *sdh, char **vars, uint *varsz) { struct si_pub *sih = &sii->pub; uint32 w, savewin; chipcregs_t *cc; char *pvars = NULL; uint origidx; ASSERT(GOODREGS(regs)); bzero((uchar*)sii, sizeof(si_info_t)); { if (NULL == (common_info_alloced = (void *)MALLOC(osh, sizeof(si_common_info_t)))) { SI_ERROR(("si_doattach: malloc failed! malloced %dbytes\n", MALLOCED(osh))); return (NULL); } bzero((uchar*)(common_info_alloced), sizeof(si_common_info_t)); } sii->common_info = (si_common_info_t *)common_info_alloced; sii->common_info->attach_count++; savewin = 0; sih->buscoreidx = BADIDX; sii->curmap = regs; sii->sdh = sdh; sii->osh = osh; /* find Chipcommon address */ if (bustype == PCI_BUS) { savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); if (!GOODCOREADDR(savewin, SI_ENUM_BASE)) savewin = SI_ENUM_BASE; OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE); cc = (chipcregs_t *)regs; } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) { cc = (chipcregs_t *)sii->curmap; } else { cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); } sih->bustype = bustype; if (bustype != BUSTYPE(bustype)) { SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype))); return NULL; } /* bus/core/clk setup for register access */ if (!si_buscore_prep(sii, bustype, devid, sdh)) { SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype)); return NULL; } /* ChipID recognition. * We assume we can read chipid at offset 0 from the regs arg. * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon), * some way of recognizing them needs to be added here. */ w = R_REG(osh, &cc->chipid); sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; /* Might as wll fill in chip id rev & pkg */ sih->chip = w & CID_ID_MASK; sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chippkg != BCM4329_289PIN_PKG_ID)) sih->chippkg = BCM4329_182PIN_PKG_ID; sih->issim = IS_SIM(sih->chippkg); /* scan for cores */ if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) { SI_MSG(("Found chip type SB (0x%08x)\n", w)); sb_scan(&sii->pub, regs, devid); } else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) { SI_MSG(("Found chip type AI (0x%08x)\n", w)); /* pass chipc address instead of original core base */ ai_scan(&sii->pub, (void *)cc, devid); } else { SI_ERROR(("Found chip of unkown type (0x%08x)\n", w)); return NULL; } /* no cores found, bail out */ if (sii->numcores == 0) { SI_ERROR(("si_doattach: could not find any cores\n")); return NULL; } /* bus/core/clk setup */ origidx = SI_CC_IDX; if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) { SI_ERROR(("si_doattach: si_buscore_setup failed\n")); return NULL; } pvars = NULL; if (sii->pub.ccrev >= 20) { cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); W_REG(osh, &cc->gpiopullup, 0); W_REG(osh, &cc->gpiopulldown, 0); si_setcoreidx(sih, origidx); } /* Skip PMU initialization from the Dongle Host. * Firmware will take care of it when it comes up. */ return (sii); }
hnddma_t * dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level) { dma_info_t *di; uint size; /* allocate private info structure */ if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) { return (NULL); } bzero((char *)di, sizeof(dma_info_t)); di->msg_level = msg_level ? msg_level : &dma_msg_level; /* old chips w/o sb is no longer supported */ ASSERT(sbh != NULL); di->dma64 = ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64); #ifndef BCMDMA64 if (di->dma64) { DMA_ERROR(("dma_attach: driver doesn't have the capability to support " "64 bits DMA\n")); goto fail; } #endif if ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64) { //printk("DMA64 supported!\n"); } /* check arguments */ ASSERT(ISPOWEROF2(ntxd)); ASSERT(ISPOWEROF2(nrxd)); if (nrxd == 0) ASSERT(dmaregsrx == NULL); if (ntxd == 0) ASSERT(dmaregstx == NULL); /* init dma reg pointer */ if (di->dma64) { ASSERT(ntxd <= D64MAXDD); ASSERT(nrxd <= D64MAXDD); di->d64txregs = (dma64regs_t *)dmaregstx; di->d64rxregs = (dma64regs_t *)dmaregsrx; di->dma64align = D64RINGALIGN; if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) { /* for smaller dd table, HW relax the alignment requirement */ di->dma64align = D64RINGALIGN / 2; } } else { ASSERT(ntxd <= D32MAXDD); ASSERT(nrxd <= D32MAXDD); di->d32txregs = (dma32regs_t *)dmaregstx; di->d32rxregs = (dma32regs_t *)dmaregsrx; } DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (di->dma64 ? "DMA64" : "DMA32"), osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, dmaregstx, dmaregsrx)); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL-1] = '\0'; di->osh = osh; di->sbh = sbh; /* save tunables */ di->ntxd = ntxd; di->nrxd = nrxd; /* the actual dma size doesn't include the extra headroom */ if (rxbufsize > BCMEXTRAHDROOM) di->rxbufsize = rxbufsize - BCMEXTRAHDROOM; else di->rxbufsize = rxbufsize; di->nrxpost = nrxpost; di->rxoffset = rxoffset; /* * figure out the DMA physical address offset for dd and data * for old chips w/o sb, use zero * for new chips w sb, * PCI/PCIE: they map silicon backplace address to zero based memory, need offset * Other bus: use zero * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor */ di->ddoffsetlow = 0; di->dataoffsetlow = 0; /* for pci bus, add offset */ if (sbh->bustype == PCI_BUS) { if ((sbh->buscoretype == SB_PCIE) && di->dma64) { /* pcie with DMA64 */ di->ddoffsetlow = 0; di->ddoffsethigh = SB_PCIE_DMA_H32; } else { /* pci(DMA32/DMA64) or pcie with DMA32 */ di->ddoffsetlow = SB_PCI_DMA; di->ddoffsethigh = 0; } di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; } #if defined(__mips__) && defined(IL_BIGENDIAN) di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED; #endif di->addrext = _dma_isaddrext(di); /* allocate tx packet pointer vector */ if (ntxd) { size = ntxd * sizeof(void *); if ((di->txp = MALLOC(osh, size)) == NULL) { DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh))); goto fail; } bzero((char *)di->txp, size); } /* allocate rx packet pointer vector */ if (nrxd) { size = nrxd * sizeof(void *); if ((di->rxp = MALLOC(osh, size)) == NULL) { DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh))); goto fail; } bzero((char *)di->rxp, size); } /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */ if (ntxd) { if (!_dma_alloc(di, DMA_TX)) goto fail; } /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */ if (nrxd) { if (!_dma_alloc(di, DMA_RX)) goto fail; } if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) { DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n", di->name, di->txdpa)); goto fail; } if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) { DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n", di->name, di->rxdpa)); goto fail; } DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext)); /* allocate tx packet pointer vector and DMA mapping vectors */ if (ntxd) { size = ntxd * sizeof(osldma_t **); if ((di->txp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL) goto fail; bzero((char*)di->txp_dmah, size); }else di->txp_dmah = NULL; /* allocate rx packet pointer vector and DMA mapping vectors */ if (nrxd) { size = nrxd * sizeof(osldma_t **); if ((di->rxp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL) goto fail; bzero((char*)di->rxp_dmah, size); } else di->rxp_dmah = NULL; /* initialize opsvec of function pointers */ di->hnddma.di_fn = DMA64_ENAB(di) ? dma64proc : dma32proc; return ((hnddma_t *)di); fail: _dma_detach(di); return (NULL); }
bool spi_hw_attach(sdioh_info_t *sd) { osl_t *osh; spih_info_t *si; sd_trace(("%s: enter\n", __FUNCTION__)); osh = sd->osh; if ((si = (spih_info_t *)MALLOC(osh, sizeof(spih_info_t))) == NULL) { sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); return FALSE; } bzero(si, sizeof(spih_info_t)); sd->controller = si; si->osh = sd->osh; si->rev = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_REV, 4) & 0xFF; if (si->rev < 3) { sd_err(("Host controller %d not supported, please upgrade to rev >= 3\n", si->rev)); MFREE(osh, si, sizeof(spih_info_t)); return (FALSE); } sd_err(("Attaching to Generic PCI SPI Host Controller Rev %d\n", si->rev)); ASSERT(si->rev >= 3); si->bar0 = sd->bar0; if (si->rev < 10) { si->pciregs = (spih_pciregs_t *)spi_reg_map(osh, (uintptr)si->bar0, sizeof(spih_pciregs_t)); sd_err(("Mapped PCI Core regs to BAR0 at %p\n", si->pciregs)); si->bar1 = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR1, 4); si->regs = (spih_regs_t *)spi_reg_map(osh, (uintptr)si->bar1, sizeof(spih_regs_t)); sd_err(("Mapped SPI Controller regs to BAR1 at %p\n", si->regs)); } else { si->regs = (spih_regs_t *)spi_reg_map(osh, (uintptr)si->bar0, sizeof(spih_regs_t)); sd_err(("Mapped SPI Controller regs to BAR0 at %p\n", si->regs)); si->pciregs = NULL; } SPIPCI_WREG(osh, &si->regs->spih_ctrl, 0x000000d1); SPIPCI_WREG(osh, &si->regs->spih_ext, 0x00000000); SPIPCI_WREG(osh, &si->regs->spih_gpio_data, SPIH_CS); SPIPCI_WREG(osh, &si->regs->spih_gpio_ctrl, (SPIH_CS | SPIH_SLOT_POWER)); while ((SPIPCI_RREG(osh, &si->regs->spih_stat) & SPIH_RFEMPTY) == 0) { SPIPCI_RREG(osh, &si->regs->spih_data); } OSL_DELAY(250000); if (si->rev >= 4) { if (SPIPCI_RREG(osh, &si->regs->spih_gpio_data) & SPIH_CARD_DETECT) { sd_err(("%s: no card detected in SD slot\n", __FUNCTION__)); spi_reg_unmap(osh, (uintptr)si->regs, sizeof(spih_regs_t)); if (si->pciregs) { spi_reg_unmap(osh, (uintptr)si->pciregs, sizeof(spih_pciregs_t)); } MFREE(osh, si, sizeof(spih_info_t)); return FALSE; } } SPIPCI_WREG(osh, &si->regs->spih_int_edge, 0x80000000); SPIPCI_WREG(osh, &si->regs->spih_int_pol, 0x40000004); if (si->pciregs) { SPIPCI_WREG(osh, &si->pciregs->ICR, PCI_INT_PROP_EN); } sd_trace(("%s: exit\n", __FUNCTION__)); return TRUE; }
/* Attach to PCI-SPI Host Controller Hardware */ bool spi_hw_attach(sdioh_info_t *sd) { osl_t *osh; spih_info_t *si; sd_trace(("%s: enter\n", __FUNCTION__)); osh = sd->osh; if ((si = (spih_info_t *)MALLOC(osh, sizeof(spih_info_t))) == NULL) { sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); return FALSE; } bzero(si, sizeof(spih_info_t)); sd->controller = si; si->osh = sd->osh; si->rev = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_REV, 4) & 0xFF; if (si->rev < 3) { sd_err(("Host controller %d not supported, please upgrade to rev >= 3\n", si->rev)); MFREE(osh, si, sizeof(spih_info_t)); return (FALSE); } sd_err(("Attaching to Generic PCI SPI Host Controller Rev %d\n", si->rev)); /* FPGA Revision < 3 not supported by driver anymore. */ ASSERT(si->rev >= 3); si->bar0 = sd->bar0; /* Rev < 10 PciSpiHost has 2 BARs: * BAR0 = PCI Core Registers * BAR1 = PciSpiHost Registers (all other cores on backplane) * * Rev 10 and up use a different PCI core which only has a single * BAR0 which contains the PciSpiHost Registers. */ if (si->rev < 10) { si->pciregs = (spih_pciregs_t *)spi_reg_map(osh, (uintptr)si->bar0, sizeof(spih_pciregs_t)); sd_err(("Mapped PCI Core regs to BAR0 at %p\n", si->pciregs)); si->bar1 = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR1, 4); si->regs = (spih_regs_t *)spi_reg_map(osh, (uintptr)si->bar1, sizeof(spih_regs_t)); sd_err(("Mapped SPI Controller regs to BAR1 at %p\n", si->regs)); } else { si->regs = (spih_regs_t *)spi_reg_map(osh, (uintptr)si->bar0, sizeof(spih_regs_t)); sd_err(("Mapped SPI Controller regs to BAR0 at %p\n", si->regs)); si->pciregs = NULL; } /* Enable SPI Controller, 16.67MHz SPI Clock */ SPIPCI_WREG(osh, &si->regs->spih_ctrl, 0x000000d1); /* Set extended feature register to defaults */ SPIPCI_WREG(osh, &si->regs->spih_ext, 0x00000000); /* Set GPIO CS# High (de-asserted) */ SPIPCI_WREG(osh, &si->regs->spih_gpio_data, SPIH_CS); /* set GPIO[0] to output for CS# */ /* set GPIO[1] to output for power control */ /* set GPIO[2] to input for card detect */ SPIPCI_WREG(osh, &si->regs->spih_gpio_ctrl, (SPIH_CS | SPIH_SLOT_POWER)); /* Clear out the Read FIFO in case there is any stuff left in there from a previous run. */ while ((SPIPCI_RREG(osh, &si->regs->spih_stat) & SPIH_RFEMPTY) == 0) { SPIPCI_RREG(osh, &si->regs->spih_data); } /* Wait for power to stabilize to the SDIO Card (100msec was insufficient) */ OSL_DELAY(250000); /* Check card detect on FPGA Revision >= 4 */ if (si->rev >= 4) { if (SPIPCI_RREG(osh, &si->regs->spih_gpio_data) & SPIH_CARD_DETECT) { sd_err(("%s: no card detected in SD slot\n", __FUNCTION__)); spi_reg_unmap(osh, (uintptr)si->regs, sizeof(spih_regs_t)); if (si->pciregs) { spi_reg_unmap(osh, (uintptr)si->pciregs, sizeof(spih_pciregs_t)); } MFREE(osh, si, sizeof(spih_info_t)); return FALSE; } } /* Interrupts are level sensitive */ SPIPCI_WREG(osh, &si->regs->spih_int_edge, 0x80000000); /* Interrupts are active low. */ SPIPCI_WREG(osh, &si->regs->spih_int_pol, 0x40000004); /* Enable interrupts through PCI Core. */ if (si->pciregs) { SPIPCI_WREG(osh, &si->pciregs->ICR, PCI_INT_PROP_EN); } sd_trace(("%s: exit\n", __FUNCTION__)); return TRUE; }
static void * chipattach(etc_info_t *etc, void *osh, void *regsva) { struct bcm4xxx *ch; bcmenetregs_t *regs; char name[16]; char *var; uint boardflags, boardtype; ET_TRACE(("et%d: chipattach: regsva 0x%lx\n", etc->unit, (ulong)regsva)); if ((ch = (struct bcm4xxx *)MALLOC(osh, sizeof(struct bcm4xxx))) == NULL) { ET_ERROR(("et%d: chipattach: out of memory, malloced %d bytes\n", etc->unit, MALLOCED(osh))); return (NULL); } bzero((char *)ch, sizeof(struct bcm4xxx)); ch->etc = etc; ch->et = etc->et; ch->osh = osh; /* store the pointer to the sw mib */ etc->mib = (void *)&ch->mib; /* get si handle */ if ((ch->sih = si_attach(etc->deviceid, ch->osh, regsva, PCI_BUS, NULL, &ch->vars, &ch->vars_size)) == NULL) { ET_ERROR(("et%d: chipattach: si_attach error\n", etc->unit)); goto fail; } /* We used to have an assert here like: * si_coreid(ch->sih) == ENET_CORE_ID * but srom-less systems and simulators don't have a way to * provide a default bar0window so we were relying on nvram * variables. At some point we decided that we could do away * with that since the wireless driver was simply doing a * setcore in attach. So we need to do the same here for * the ethernet. */ if ((regs = (bcmenetregs_t *)si_setcore(ch->sih, ENET_CORE_ID, etc->unit)) == NULL) { ET_ERROR(("et%d: chipattach: Could not setcore to the ENET core\n", etc->unit)); goto fail; } ch->regs = regs; etc->chip = ch->sih->chip; etc->chiprev = ch->sih->chiprev; etc->coreid = si_coreid(ch->sih); etc->corerev = si_corerev(ch->sih); etc->nicmode = !(ch->sih->bustype == SI_BUS); etc->coreunit = si_coreunit(ch->sih); etc->boardflags = getintvar(ch->vars, "boardflags"); etc->hwrxoff = HWRXOFF; boardflags = etc->boardflags; boardtype = ch->sih->boardtype; /* Backplane clock ticks per microsecs: used by gptimer, intrecvlazy */ etc->bp_ticks_usec = si_clock(ch->sih) / 1000000; /* get our local ether addr */ sprintf(name, "et%dmacaddr", etc->coreunit); var = getvar(ch->vars, name); if (var == NULL) { ET_ERROR(("et%d: chipattach: NVRAM_GET(%s) not found\n", etc->unit, name)); goto fail; } bcm_ether_atoe(var, &etc->perm_etheraddr); if (ETHER_ISNULLADDR(&etc->perm_etheraddr)) { ET_ERROR(("et%d: chipattach: invalid format: %s=%s\n", etc->unit, name, var)); goto fail; } bcopy((char *)&etc->perm_etheraddr, (char *)&etc->cur_etheraddr, ETHER_ADDR_LEN); /* * Too much can go wrong in scanning MDC/MDIO playing "whos my phy?" . * Instead, explicitly require the environment var "et<coreunit>phyaddr=<val>". */ /* get our phyaddr value */ sprintf(name, "et%dphyaddr", etc->coreunit); var = getvar(ch->vars, name); if (var == NULL) { ET_ERROR(("et%d: chipattach: NVRAM_GET(%s) not found\n", etc->unit, name)); goto fail; } etc->phyaddr = bcm_atoi(var) & EPHY_MASK; /* nvram says no phy is present */ if (etc->phyaddr == EPHY_NONE) { ET_ERROR(("et%d: chipattach: phy not present\n", etc->unit)); goto fail; } /* get our mdc/mdio port number */ sprintf(name, "et%dmdcport", etc->coreunit); var = getvar(ch->vars, name); if (var == NULL) { ET_ERROR(("et%d: chipattach: NVRAM_GET(%s) not found\n", etc->unit, name)); goto fail; } etc->mdcport = bcm_atoi(var); /* configure pci core */ si_pci_setup(ch->sih, (1 << si_coreidx(ch->sih))); /* reset the enet core */ chipreset(ch); /* dma attach */ sprintf(name, "et%d", etc->coreunit); if ((ch->di = dma_attach(osh, name, ch->sih, (void *)®s->dmaregs.xmt, (void *)®s->dmaregs.rcv, NTXD, NRXD, RXBUFSZ, -1, NRXBUFPOST, HWRXOFF, &et_msg_level)) == NULL) { ET_ERROR(("et%d: chipattach: dma_attach failed\n", etc->unit)); goto fail; } etc->txavail[TX_Q0] = (uint *)&ch->di->txavail; /* set default sofware intmask */ ch->intmask = DEF_INTMASK; /* * For the 5222 dual phy shared mdio contortion, our phy is * on someone elses mdio pins. This other enet enet * may not yet be attached so we must defer the et_phyfind(). */ /* if local phy: reset it once now */ if (etc->mdcport == etc->coreunit) chipphyreset(ch, etc->phyaddr); #ifdef ETROBO /* * Broadcom Robo ethernet switch. */ if ((boardflags & BFL_ENETROBO) && (etc->phyaddr == EPHY_NOREG)) { /* Attach to the switch */ if (!(etc->robo = bcm_robo_attach(ch->sih, ch, ch->vars, (miird_f)bcm47xx_et_chops.phyrd, (miiwr_f)bcm47xx_et_chops.phywr))) { ET_ERROR(("et%d: chipattach: robo_attach failed\n", etc->unit)); goto fail; } /* Enable the switch and set it to a known good state */ if (bcm_robo_enable_device(etc->robo)) { ET_ERROR(("et%d: chipattach: robo_enable_device failed\n", etc->unit)); goto fail; } /* Configure the switch to do VLAN */ if ((boardflags & BFL_ENETVLAN) && bcm_robo_config_vlan(etc->robo, etc->perm_etheraddr.octet)) { ET_ERROR(("et%d: chipattach: robo_config_vlan failed\n", etc->unit)); goto fail; } /* Enable switching/forwarding */ if (bcm_robo_enable_switch(etc->robo)) { ET_ERROR(("et%d: chipattach: robo_enable_switch failed\n", etc->unit)); goto fail; } } #endif /* ETROBO */ #ifdef ETADM /* * ADMtek ethernet switch. */ if (boardflags & BFL_ENETADM) { /* Attach to the device */ if (!(ch->adm = adm_attach(ch->sih, ch->vars))) { ET_ERROR(("et%d: chipattach: adm_attach failed\n", etc->unit)); goto fail; } /* Enable the external switch and set it to a known good state */ if (adm_enable_device(ch->adm)) { ET_ERROR(("et%d: chipattach: adm_enable_device failed\n", etc->unit)); goto fail; } /* Configure the switch */ if ((boardflags & BFL_ENETVLAN) && adm_config_vlan(ch->adm)) { ET_ERROR(("et%d: chipattach: adm_config_vlan failed\n", etc->unit)); goto fail; } } #endif /* ETADM */ return ((void *)ch); fail: chipdetach(ch); return (NULL); }
NDIS_STATUS shared_lb_alloc( IN shared_info_t *shared, IN struct lbfree *l, IN uint total, IN BOOLEAN shared_mem, IN BOOLEAN cached, IN BOOLEAN piomode, IN BOOLEAN data_buf ) { NDIS_STATUS status; page_t page; int maxpages; int i; uint ipp, lbdatasz; /* uncached not supported */ ASSERT(cached); if (data_buf) total = ROUNDUP(total, BPP); else /* add one if LBPP is not page aligned */ total = ROUNDUP(total, (LBPP + ((PAGE_SIZE % sizeof(struct lbuf)) ? 1 : 0))); ND_TRACE(("%s%d: shared_lb_alloc: total %d\n", shared->id, shared->unit, total)); l->free = NULL; l->total = total; l->count = 0; l->size = data_buf ? LBUFSZ : sizeof(struct lbuf); l->pages = NULL; l->npages = 0; l->headroom = 0; NdisAllocateSpinLock(&l->queue_lock); maxpages = (l->total * l->size) / PAGE_SIZE; /* allocate page list memory */ if ((l->pages = (page_t*) MALLOC(shared->osh, maxpages * sizeof(page_t))) == NULL) goto enomem; bzero(l->pages, maxpages * sizeof(page_t)); /* set item per page number and data size */ if (data_buf) { ipp = BPP; lbdatasz = LBDATASZ; } else { ipp = LBPP; lbdatasz = 0; } /* fill the freelist */ for (i = 0; i < maxpages; i++) { status = shared_allocpage(shared, shared_mem, cached, &page); if (NDIS_ERROR(status)) goto enomem; status = shared_lb_addpage(shared, l, piomode, &page, ipp, lbdatasz); if (NDIS_ERROR(status)) goto enomem; } return (NDIS_STATUS_SUCCESS); enomem: ND_ERROR(("%s%d: shared_lb_alloc: out of memory, malloced %d bytes\n", shared->id, shared->unit, MALLOCED(shared->osh))); shared_lb_free(shared, l, shared_mem, TRUE); return (NDIS_STATUS_RESOURCES); }