static uint32 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr) { uint8 tmp; uint32 val, intr_val = 0; /* * compact flash only has 11 bits address, while we needs 12 bits address. * MEM_SEG will be OR'd with other 11 bits address in hardware, * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special */ if (PCMCIA(sii)) { INTR_OFF(sii, intr_val); tmp = 1; OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ } val = R_REG(sii->osh, sbr); if (PCMCIA(sii)) { tmp = 0; OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); INTR_RESTORE(sii, intr_val); } return (val); }
/* return TRUE if PM capability exists in the pci config space * Uses and caches the information using core handle */ static bool pcicore_pmecap(pcicore_info_t *pi) { uint8 cap_ptr; uint32 pmecap; sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint16*reg16; if (!pi->pmecap_offset) { cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_POWERMGMTCAP_ID, NULL, NULL); if (!cap_ptr) return FALSE; pi->pmecap_offset = cap_ptr; reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV8]; pi->pmebits = R_REG(pi->osh, reg16); pmecap = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset, sizeof(uint32)); /* At least one state can generate PME */ pi->pmecap = (pmecap & PME_CAP_PM_STATES) != 0; } return (pi->pmecap); }
static bool pcie2_mdiosetblock(pcicore_info_t *pi, uint blk) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint mdiodata, mdioctrl, i = 0; uint pcie_serdes_spinwait = 200; mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF); W_REG(pi->osh, &pcieregs->u.pcie2.mdiocontrol, mdioctrl); mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE; W_REG(pi->osh, &pcieregs->u.pcie2.mdiowrdata, mdiodata); PR28829_DELAY(); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { if (!(R_REG(pi->osh, &(pcieregs->u.pcie2.mdiowrdata)) & MDIODATA2_DONE)) { break; } OSL_DELAY(1000); i++; } if (i >= pcie_serdes_spinwait) { PCI_ERROR(("pcie_mdiosetblock: timed out\n")); return FALSE; } return TRUE; }
bool BCMINITFN(sb_cc_register_isr)(sb_t *sbh, cc_isr_fn isr, uint32 ccintmask, void *cbdata) { bool done = FALSE; chipcregs_t *regs; uint origidx; uint i; /* Save the current core index */ origidx = sb_coreidx(sbh); regs = sb_setcore(sbh, SB_CC, 0); ASSERT(regs); for (i = 0; i < MAX_CC_INT_SOURCE; i++) { if (cc_isr_desc[i].isr == NULL) { cc_isr_desc[i].isr = isr; cc_isr_desc[i].cbdata = cbdata; cc_isr_desc[i].intmask = ccintmask; done = TRUE; break; } } if (done) { cc_intmask = R_REG(sb_osh(sbh), ®s->intmask); cc_intmask |= ccintmask; W_REG(sb_osh(sbh), ®s->intmask, cc_intmask); } /* restore original coreidx */ sb_setcoreidx(sbh, origidx); return done; }
uint si_intflag(si_t *sih) { si_info_t *sii = SI_INFO(sih); if (CHIPTYPE(sih->socitype) == SOCI_SB) { sbconfig_t *ccsbr = (sbconfig_t *)((uintptr)((ulong) (sii->common_info->coresba[SI_CC_IDX]) + SBCONFIGOFF)); return R_REG(sii->osh, &ccsbr->sbflagst); } else if (CHIPTYPE(sih->socitype) == SOCI_AI) return R_REG(sii->osh, ((uint32 *)(uintptr) (sii->common_info->oob_router + OOB_STATUSA))); else { ASSERT(0); return 0; } }
void hnd_jtagm_disable(si_t *sih, void *h) { chipcregs_t *cc = (chipcregs_t *)h; W_REG(NULL, &cc->jtagctrl, R_REG(NULL, &cc->jtagctrl) & ~JCTRL_EN); }
/* disable chip interrupts */ static void BCMFASTPATH chipintrsoff(struct bcm4xxx *ch) { W_REG(ch->osh, &ch->regs->intmask, 0); (void) R_REG(ch->osh, &ch->regs->intmask); /* sync readback */ ch->intmask = 0; }
void * dma_getnextrxp(dma_info_t *di, bool forceall) { uint i; void *rxp; /* if forcing, dma engine must be disabled */ ASSERT(!forceall || !dma_rxenabled(di)); i = di->rxin; /* return if no packets posted */ if (i == di->rxout) return (NULL); /* ignore curr if forceall */ if (!forceall && (i == B2I(R_REG(&di->regs->rcvstatus) & RS_CD_MASK))) return (NULL); /* get the packet pointer that corresponds to the rx descriptor */ rxp = di->rxp[i]; ASSERT(rxp); di->rxp[i] = NULL; /* clear this packet from the descriptor ring */ DMA_UNMAP(di->dev, (BUS_SWAP32(R_SM(&di->rxd[i].addr)) - di->dataoffset), di->rxbufsize, DMA_RX, rxp); W_SM(&di->rxd[i].addr, 0); di->rxin = NEXTRXD(i); return (rxp); }
/* On success, the total number of characters written is returned. This count does not include the additional null-character automatically appended at the end of the string. On failure, a negative number is returned. */ int nios2_get_register_alias(char *alias, int alias_size, const char *reg_name, int config_num, int is_dest) { int gp_reg_num = nios2_get_register_num(reg_name, R_REG()); if (gp_reg_num == -1) return -1; const struct nios2_ext_reg_map ext_reg_map = nios2_builtin_ext_reg_maps[config_num]; if (ext_reg_map.rx_tx.num_regs > 0 ) { int r_offset = gp_reg_num - ext_reg_map.rx_tx.offset; if (r_offset >= 0 && r_offset < ext_reg_map.rx_tx.num_regs) { if (is_dest) { if (alias_size > (int)(strlen(TX_REG()) + 3)) return sprintf(alias,TX_REG(%d),r_offset); } else { if (alias_size > (int)(strlen(RX_REG()) + 3)) return sprintf(alias,RX_REG(%d),r_offset); } }
/** * Read host bridge PCI config registers from Silicon Backplane ( >= rev8 ). * * It returns TRUE to indicate that access to the host bridge's pci config * from SI is ok, and values in 'addr' and 'val' are valid. * * It can only read registers at multiple of 4-bytes. Callers must pick up * needed bytes from 'val' based on 'off' value. Value in 'addr' reflects * the register address where value in 'val' is read. */ static bool si_pcihb_read_config(si_t *sih, uint coreunit, uint bus, uint dev, uint func, uint off, uint32 **addr, uint32 *val) { sbpciregs_t *pci; osl_t *osh; uint coreidx; bool ret = FALSE; /* sanity check */ ASSERT(hndpci_is_hostbridge(bus, dev)); /* we support only two functions on device 0 */ if (func > 1) return FALSE; osh = si_osh(sih); /* read pci config when core rev >= 8 */ coreidx = si_coreidx(sih); pci = (sbpciregs_t *)si_setcore(sih, PCI_CORE_ID, coreunit); if (pci) { if (si_corerev(sih) >= PCI_HBSBCFG_REV) { *addr = (uint32 *)&pci->pcicfg[func][off >> 2]; *val = R_REG(osh, *addr); ret = TRUE; } } else {
static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint mdiodata, i = 0; uint pcie_serdes_spinwait = 200; mdiodata = MDIODATA_START | MDIODATA_WRITE | (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) | (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | MDIODATA_TA | (blk << 4); W_REG(pi->osh, &pcieregs->u.pcie1.mdiodata, mdiodata); PR28829_DELAY(); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { if (R_REG(pi->osh, &(pcieregs->u.pcie1.mdiocontrol)) & MDIOCTL_ACCESS_DONE) { break; } OSL_DELAY(1000); i++; } if (i >= pcie_serdes_spinwait) { PCI_ERROR(("pcie_mdiosetblock: timed out\n")); return FALSE; } return TRUE; }
void sb_jtagm_disable(osl_t *osh, void *h) { chipcregs_t *cc = (chipcregs_t *)h; W_REG(osh, &cc->jtagctrl, R_REG(osh, &cc->jtagctrl) & ~JCTRL_EN); }
bool dma_rxenabled(dma_info_t *di) { uint32 rc; rc = R_REG(&di->regs->rcvcontrol); return ((rc != 0xffffffff) && (rc & RC_RE)); }
uint32 jtag_rwreg(osl_t * osh, void *h, uint32 ir, uint32 dr) { chipcregs_t *cc = (chipcregs_t *) h; uint32 tmp; W_REG(osh, &cc->jtagir, ir); W_REG(osh, &cc->jtagdr, dr); tmp = JCMD_START | JCMD_ACC_IRDR | ((IRWIDTH - 1) << JCMD_IRW_SHIFT) | (DRWIDTH - 1); W_REG(osh, &cc->jtagcmd, tmp); while (((tmp = R_REG(osh, &cc->jtagcmd)) & JCMD_BUSY) == JCMD_BUSY) { /* OSL_DELAY(1); */ } tmp = R_REG(osh, &cc->jtagdr); return (tmp); }
static int bcm947xx_pcm_close(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; bcm947xx_i2s_info_t *snd_bcm = rtd->dai->cpu_dai->private_data; struct bcm947xx_runtime_data *brtd = substream->runtime->private_data; DBG("%s %s\n", __FUNCTION__, bcm947xx_direction_str(substream)); DBG("%s: i2s intstatus 0x%x intmask 0x%x\n", __FUNCTION__, R_REG(snd_bcm->osh, &snd_bcm->regs->intstatus), R_REG(snd_bcm->osh, &snd_bcm->regs->intmask)); /* #if required because dma_dump is unavailable in non-debug builds. */ #if BCM947XX_DUMP_RING_BUFFER_ON_PCM_CLOSE_ON { /* dump dma rings to console */ #if !defined(FIFOERROR_DUMP_SIZE) #define FIFOERROR_DUMP_SIZE 8192 #endif char *tmp; struct bcmstrbuf b; if (snd_bcm->di[0] && (tmp = MALLOC(snd_bcm->osh, FIFOERROR_DUMP_SIZE))) { bcm_binit(&b, tmp, FIFOERROR_DUMP_SIZE); dma_dump(snd_bcm->di[0], &b, TRUE); printbig(tmp); MFREE(snd_bcm->osh, tmp, FIFOERROR_DUMP_SIZE); } } #endif /* BCM947XX_DUMP_RING_BUFFER_ON_PCM_CLOSE_ON */ /* reclaim all descriptors */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { dma_rxreset(snd_bcm->di[0]); dma_rxreclaim(snd_bcm->di[0]); } else { dma_txreset(snd_bcm->di[0]); dma_txreclaim(snd_bcm->di[0], HNDDMA_RANGE_ALL); } if (brtd) kfree(brtd); else DBG("%s: called with brtd == NULL\n", __FUNCTION__); return 0; }
/* Read len bytes starting at offset into buf. Returns number of bytes read. */ int sflash_read(si_t *sih, chipcregs_t *cc, uint offset, uint len, uchar *buf) { uint8 *from, *to; int cnt, i; osl_t *osh; ASSERT(sih); if (!len) return 0; if ((offset + len) > sflash.size) return -22; if ((len >= 4) && (offset & 3)) cnt = 4 - (offset & 3); else if ((len >= 4) && ((uintptr)buf & 3)) cnt = 4 - ((uintptr)buf & 3); else cnt = len; osh = si_osh(sih); from = (uint8 *)OSL_UNCACHED(SI_FLASH2 + offset); to = (uint8 *)buf; if (cnt < 4) { for (i = 0; i < cnt; i ++) { *to = R_REG(osh, from); from ++; to ++; } return cnt; } while (cnt >= 4) { *(uint32 *)to = R_REG(osh, (uint32 *)from); from += 4; to += 4; cnt -= 4; } return (len - cnt); }
static void bcm5301x_bb_post_xfer(struct i2c_adapter *adap) { struct i2c_algo_bit_data *bit_data = adap->algo_data; struct bcm5301x_i2c_data *pdata = bit_data->data; chipcommonbregs_t *ccb = pdata->ccb; W_REG(SI_OSH, &ccb->smbus_config, R_REG(SI_OSH, &ccb->smbus_config) & ~BCM5301X_SMBUS_CFG_BITBANG_EN_B); }
bool dma_txenabled(dma_info_t *di) { uint32 xc; /* If the chip is dead, it is not enabled :-) */ xc = R_REG(&di->regs->xmtcontrol); return ((xc != 0xffffffff) && (xc & XC_XE)); }
/* Needs to happen when update to shadow SROM is needed * : Coming out of 'standby'/'hibernate' * : If pcie_war_aspm_ovr state changed */ static void pcie_war_aspm_clkreq(pcicore_info_t *pi) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; si_t *sih = pi->sih; uint16 val16, *reg16; uint32 w; if (!PCIEGEN1_ASPM(sih)) return; /* bypass this on QT or VSIM */ if (!ISSIM_ENAB(sih)) { reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET]; val16 = R_REG(pi->osh, reg16); val16 &= ~SRSH_ASPM_ENB; if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB) val16 |= SRSH_ASPM_ENB; else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB) val16 |= SRSH_ASPM_L1_ENB; else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB) val16 |= SRSH_ASPM_L0s_ENB; W_REG(pi->osh, reg16, val16); w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32)); w &= ~PCIE_ASPM_ENAB; w |= pi->pcie_war_aspm_ovr; OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32), w); } reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5]; val16 = R_REG(pi->osh, reg16); if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) { val16 |= SRSH_CLKREQ_ENB; pi->pcie_pr42767 = TRUE; } else val16 &= ~SRSH_CLKREQ_ENB; W_REG(pi->osh, reg16, val16); }
static uint16 chipphyrd(struct bcm4xxx *ch, uint phyaddr, uint reg) { bcmenetregs_t *regs; ASSERT(phyaddr < MAXEPHY); /* * BCM5222 dualphy shared mdio contortion. * remote phy: another emac controls our phy. */ if (ch->etc->mdcport != ch->etc->coreunit) { if (ch->etphy == NULL) { ch->etphy = et_phyfind(ch->et, ch->etc->mdcport); /* first time reset */ if (ch->etphy) chipphyreset(ch, ch->etc->phyaddr); } if (ch->etphy) return (et_phyrd(ch->etphy, phyaddr, reg)); else return (0xffff); } /* local phy: our emac controls our phy */ regs = ch->regs; /* clear mii_int */ W_REG(ch->osh, ®s->emacintstatus, EI_MII); /* issue the read */ W_REG(ch->osh, ®s->mdiodata, (MD_SB_START | MD_OP_READ | (phyaddr << MD_PMD_SHIFT) | (reg << MD_RA_SHIFT) | MD_TA_VALID)); /* wait for it to complete */ SPINWAIT(((R_REG(ch->osh, ®s->emacintstatus) & EI_MII) == 0), 100); if ((R_REG(ch->osh, ®s->emacintstatus) & EI_MII) == 0) { ET_ERROR(("et%d: chipphyrd: did not complete\n", ch->etc->unit)); } return (R_REG(ch->osh, ®s->mdiodata) & MD_DATA_MASK); }
static void _ipxotp_init(otpinfo_t *oi, chipcregs_t *cc) { uint k; u32 otpp, st; /* record word offset of General Use Region for various chipcommon revs */ if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24 || oi->sih->ccrev == 27) { oi->otpgu_base = REVA4_OTPGU_BASE; } else if (oi->sih->ccrev == 36) { /* OTP size greater than equal to 2KB (128 words), otpgu_base is similar to rev23 */ if (oi->wsize >= 128) oi->otpgu_base = REVB8_OTPGU_BASE; else oi->otpgu_base = REV36_OTPGU_BASE; } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) { oi->otpgu_base = REVB8_OTPGU_BASE; } /* First issue an init command so the status is up to date */ otpp = OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK); W_REG(oi->osh, &cc->otpprog, otpp); for (k = 0; ((st = R_REG(oi->osh, &cc->otpprog)) & OTPP_START_BUSY) && (k < OTPP_TRIES); k++) ; if (k >= OTPP_TRIES) { return; } /* Read OTP lock bits and subregion programmed indication bits */ oi->status = R_REG(oi->osh, &cc->otpstatus); if ((oi->sih->chip == BCM43224_CHIP_ID) || (oi->sih->chip == BCM43225_CHIP_ID)) { u32 p_bits; p_bits = (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) & OTPGU_P_MSK) >> OTPGU_P_SHIFT; oi->status |= (p_bits << OTPS_GUP_SHIFT); }
/* Needs to happen when update to shadow SROM is needed * : Coming out of 'standby'/'hibernate' * : If pcie_war_aspm_ovr state changed */ static void pcie_war_aspm_clkreq(pcicore_info_t *pi) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; si_t *sih = pi->sih; uint16 val16, *reg16; uint32 w; if (!PCIE_ASPM(sih)) return; /* PR43448 WAR: Enable ASPM in the shadow SROM and Link control */ /* bypass this on QT or VSIM */ if (sih->chippkg != HDLSIM_PKG_ID && sih->chippkg != HWSIM_PKG_ID) { reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET]; val16 = R_REG(pi->osh, reg16); if (!pi->pcie_war_aspm_ovr) val16 |= SRSH_ASPM_ENB; else val16 &= ~SRSH_ASPM_ENB; W_REG(pi->osh, reg16, val16); w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32)); if (!pi->pcie_war_aspm_ovr) w |= PCIE_ASPM_ENAB; else w &= ~PCIE_ASPM_ENAB; OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32), w); } /* PR42767 WAR: if clockreq is not advertized in SROM, advertize it */ reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5]; val16 = R_REG(pi->osh, reg16); if (!pi->pcie_war_aspm_ovr) { val16 |= SRSH_CLKREQ_ENB; pi->pcie_pr42767 = TRUE; } else val16 &= ~SRSH_CLKREQ_ENB; W_REG(pi->osh, reg16, val16); }
static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs) { uint32 w; OR_REG(osh, &dma32regs->control, XC_AE); w = R_REG(osh, &dma32regs->control); AND_REG(osh, &dma32regs->control, ~XC_AE); return ((w & XC_AE) == XC_AE); }
/* Poll for command completion. Returns zero when complete. */ int sflash_poll(chipcregs_t *cc, uint offset) { if (offset >= sflash.size) return -22; switch (sflash.type) { case SFLASH_ST: /* Check for ST Write In Progress bit */ sflash_cmd(cc, SFLASH_ST_RDSR); return R_REG(NULL, &cc->flashdata) & SFLASH_ST_WIP; case SFLASH_AT: /* Check for Atmel Ready bit */ sflash_cmd(cc, SFLASH_AT_STATUS); return !(R_REG(NULL, &cc->flashdata) & SFLASH_AT_READY); } return 0; }
static uint32 jtm_wait(chipcregs_t *cc, bool readdr) { uint i; i = 0; while (((R_REG(NULL, &cc->jtagcmd) & JCMD_BUSY) == JCMD_BUSY) && (i < JTAG_RETRIES)) { i++; } if (i >= JTAG_RETRIES) return 0xbadbad03; if (readdr) return R_REG(NULL, &cc->jtagdr); else return 0xffffffff; }
static void bcm5301x_reset_and_en(struct bcm5301x_i2c_data *pdata) { chipcommonbregs_t *ccb = pdata->ccb; bcm5301x_reset(pdata); W_REG(SI_OSH, &ccb->smbus_master_fifo_control, BCM5301X_SMBUS_MASTER_FIFO_CTRL_RX_FIFO_THRESH_S(BCM5301X_CFG_RX_THRESH)); W_REG(SI_OSH, &ccb->smbus_timing_config, (pdata->fast ? BCM5301X_SMBUS_TIMING_CFG_MODE_400_B : 0) | BCM5301X_SMBUS_TIMING_CFG_SMBUS_IDLE_TIME_S(BCM5301X_CFG_SMBUS_IDLE_TIME)); W_REG(SI_OSH, &ccb->smbus_config, BCM5301X_SMBUS_CFG_SMB_EN_B | BCM5301X_SMBUS_CFG_MASTER_RETRY_CNT_S(BCM5301X_CFG_SMBUS_RETRY_CNT)); BCM5301X_MSG_INFO("config 0x%x timing 0x%x", R_REG(SI_OSH, &ccb->smbus_config), R_REG(SI_OSH, &ccb->smbus_timing_config)); }
uint32 pcie_survive_perst(void* pch, uint32 mask, uint32 val) { #ifdef SURVIVE_PERST_ENAB pcicore_info_t *pi = (pcicore_info_t *)pch; sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint32 w; /* mask and set */ if (mask || val) { w = (R_REG(pi->osh, (&pcieregs->control)) & ~mask) | val; W_REG(pi->osh, (&pcieregs->control), w); } /* readback */ return R_REG(pi->osh, (&pcieregs->control)); #else return 0; #endif /* SURVIVE_PERST_ENAB */ }
static int pciegen2_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val, bool slave_bypass) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; uint32 *reg32; if (!PCIE_GEN2(pi->sih)) ASSERT(0); pcie2_mdiosetblock(pi, physmedia); /* enable mdio access to SERDES */ mdio_ctrl = MDIOCTL2_DIVISOR_VAL; mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); if (slave_bypass) mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; if (!write) mdio_ctrl |= MDIOCTL2_READ; W_REG(pi->osh, (&pcieregs->u.pcie2.mdiocontrol), mdio_ctrl); if (write) { reg32 = (uint32 *)&(pcieregs->u.pcie2.mdiowrdata); W_REG(pi->osh, reg32, *val | MDIODATA2_DONE); } else reg32 = (uint32 *)&(pcieregs->u.pcie2.mdiorddata); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { if (!(R_REG(pi->osh, reg32) & MDIODATA2_DONE)) { if (!write) *val = (R_REG(pi->osh, reg32) & MDIODATA2_MASK); return 0; } OSL_DELAY(1000); i++; } return 0; }
static uint64 _switch_reg_read(void *rinfo, uint8 page, uint8 offset) { uint64 value = ~(uint64)0; uint32 regval; uint32 timeout = ROBO_POLL_TIMEOUT; robo_info_t *robo = (robo_info_t *)rinfo; si_info_t *sii; osl_t *osh; nssrabregs_t *regs; /* pointer to chip registers */ sii = SI_INFO((si_t*)robo->sbh); osh = sii->osh; regs = robo->regs; COMPILER_REFERENCE(osh); /* Assemble read command */ _switch_request_grant(rinfo); regval = ((page << CHIPCOMMONB_SRAB_CMDSTAT_SRA_PAGE_SHIFT) | (offset << CHIPCOMMONB_SRAB_CMDSTAT_SRA_OFFSET_SHIFT) | CHIPCOMMONB_SRAB_CMDSTAT_SRA_GORDYN_MASK); W_REG(osh, ®s->chipcommonb_srab_cmdstat, regval); /* Wait for command complete */ while (R_REG(osh, ®s->chipcommonb_srab_cmdstat) & CHIPCOMMONB_SRAB_CMDSTAT_SRA_GORDYN_MASK) { if (!--timeout) { SRAB_ERR(("robo_read: timeout")); _switch_interface_reset(rinfo); break; } } if (timeout) { /* Didn't time out, read and return the value */ value = (((uint64)R_REG(osh, ®s->chipcommonb_srab_rdh)) << 32) | R_REG(osh, ®s->chipcommonb_srab_rdl); } _switch_release_grant(rinfo); return value; }
static u16 ipxotp_otpr(void *oh, chipcregs_t *cc, uint wn) { otpinfo_t *oi; oi = (otpinfo_t *) oh; ASSERT(wn < oi->wsize); ASSERT(cc != NULL); return R_REG(oi->osh, &cc->sromotp[wn]); }