bool BCMINITFN(si_cc_register_isr)(si_t *sih, cc_isr_fn isr, uint32 ccintmask, void *cbdata) { bool done = FALSE; chipcregs_t *regs; uint origidx; uint i; /* Save the current core index */ origidx = si_coreidx(sih); regs = si_setcoreidx(sih, SI_CC_IDX); ASSERT(regs); for (i = 0; i < MAX_CC_INT_SOURCE; i++) { if (cc_isr_desc[i].isr == NULL) { cc_isr_desc[i].isr = isr; cc_isr_desc[i].cbdata = cbdata; cc_isr_desc[i].intmask = ccintmask; done = TRUE; break; } } if (done) { cc_intmask = R_REG(si_osh(sih), ®s->intmask); cc_intmask |= ccintmask; W_REG(si_osh(sih), ®s->intmask, cc_intmask); } /* restore original coreidx */ si_setcoreidx(sih, origidx); return done; }
static void sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v) { uint8 tmp; volatile uint32 dummy; uint32 intr_val = 0; /* * compact flash only has 11 bits address, while we needs 12 bits address. * MEM_SEG will be OR'd with other 11 bits address in hardware, * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special */ if (PCMCIA(sii)) { INTR_OFF(sii, intr_val); tmp = 1; OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ } if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) { dummy = R_REG(sii->osh, sbr); BCM_REFERENCE(dummy); W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); dummy = R_REG(sii->osh, sbr); BCM_REFERENCE(dummy); W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); } else
static int extpci_write_config(sb_t *sbh, uint bus, uint dev, uint func, uint off, void *buf, int len) { uint32 addr, *reg = NULL, val; int ret = 0; if (pci_disabled || !(addr = config_cmd(sbh, bus, dev, func, off)) || !(reg = (uint32 *) REG_MAP(addr, len)) || BUSPROBE(val, reg)) goto done; if (len == 4) val = *((uint32 *) buf); else if (len == 2) { val &= ~(0xffff << (8 * (off & 3))); val |= *((uint16 *) buf) << (8 * (off & 3)); } else if (len == 1) { val &= ~(0xff << (8 * (off & 3))); val |= *((uint8 *) buf) << (8 * (off & 3)); } else ret = -1; W_REG(reg, val); done: if (reg) REG_UNMAP(reg); return ret; }
void hnd_jtagm_disable(osl_t *osh, void *h) { chipcregs_t *cc = (chipcregs_t *)h; W_REG(osh, &cc->jtagctrl, R_REG(osh, &cc->jtagctrl) & ~JCTRL_EN); }
static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint mdiodata, i = 0; uint pcie_serdes_spinwait = 200; mdiodata = MDIODATA_START | MDIODATA_WRITE | (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) | (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | MDIODATA_TA | (blk << 4); W_REG(pi->osh, &pcieregs->u.pcie1.mdiodata, mdiodata); PR28829_DELAY(); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { if (R_REG(pi->osh, &(pcieregs->u.pcie1.mdiocontrol)) & MDIOCTL_ACCESS_DONE) { break; } OSL_DELAY(1000); i++; } if (i >= pcie_serdes_spinwait) { PCI_ERROR(("pcie_mdiosetblock: timed out\n")); return FALSE; } return TRUE; }
void hnd_jtagm_disable(si_t *sih, void *h) { chipcregs_t *cc = (chipcregs_t *)h; W_REG(NULL, &cc->jtagctrl, R_REG(NULL, &cc->jtagctrl) & ~JCTRL_EN); }
/* disable chip interrupts */ static void BCMFASTPATH chipintrsoff(struct bcm4xxx *ch) { W_REG(ch->osh, &ch->regs->intmask, 0); (void) R_REG(ch->osh, &ch->regs->intmask); /* sync readback */ ch->intmask = 0; }
static void BCMFASTPATH chiprxlazy(ch_t *ch) { uint reg_val = ((ch->etc->rxlazy_timeout & IRL_TO_MASK) | (ch->etc->rxlazy_framecnt << IRL_FC_SHIFT)); W_REG(ch->osh, &ch->regs->intrecvlazy, reg_val); }
static void chipconfigtimer(struct bcm4xxx *ch, uint microsecs) { ASSERT(ch->etc->bp_ticks_usec != 0); /* Enable general purpose timer in periodic mode */ W_REG(ch->osh, &ch->regs->gptimer, microsecs * ch->etc->bp_ticks_usec); }
uint32 jtag_rwreg(osl_t * osh, void *h, uint32 ir, uint32 dr) { chipcregs_t *cc = (chipcregs_t *) h; uint32 tmp; W_REG(osh, &cc->jtagir, ir); W_REG(osh, &cc->jtagdr, dr); tmp = JCMD_START | JCMD_ACC_IRDR | ((IRWIDTH - 1) << JCMD_IRW_SHIFT) | (DRWIDTH - 1); W_REG(osh, &cc->jtagcmd, tmp); while (((tmp = R_REG(osh, &cc->jtagcmd)) & JCMD_BUSY) == JCMD_BUSY) { /* OSL_DELAY(1); */ } tmp = R_REG(osh, &cc->jtagdr); return (tmp); }
static uint32 config_cmd(sb_t *sbh, uint bus, uint dev, uint func, uint off) { uint coreidx; sbpciregs_t *regs; uint32 addr = 0; osl_t *osh; /* CardBusMode supports only one device */ if (cardbus && dev > 1) return 0; osh = sb_osh(sbh); coreidx = sb_coreidx(sbh); regs = (sbpciregs_t *) sb_setcore(sbh, SB_PCI, 0); /* Type 0 transaction */ if (bus == 1) { /* Skip unwired slots */ if (dev < PCI_SLOT_MAX) { uint32 win; /* Slide the PCI window to the appropriate slot */ win = (SBTOPCI_CFG0 | ((1 << (dev + PCI_SLOTAD_MAP)) & SBTOPCI1_MASK)); W_REG(osh, ®s->sbtopci1, win); addr = SB_PCI_CFG | ((1 << (dev + PCI_SLOTAD_MAP)) & ~SBTOPCI1_MASK) | (func << PCICFG_FUN_SHIFT) | (off & ~3); } } else { /* Type 1 transaction */ W_REG(osh, ®s->sbtopci1, SBTOPCI_CFG1); addr = SB_PCI_CFG | (bus << PCICFG_BUS_SHIFT) | (dev << PCICFG_SLOT_SHIFT) | (func << PCICFG_FUN_SHIFT) | (off & ~3); } sb_setcoreidx(sbh, coreidx); return addr; }
/* * Switch to 'coreidx', issue a single arbitrary 32bit * register mask&set operation, * switch back to the original core, and return the new value. * * When using the silicon backplane, no fidleing with interrupts * or core switches are needed. * * Also, when using pci/pcie, we can optimize away the core switching * for pci registers * and (on newer pci cores) chipcommon registers. */ uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) { uint origidx = 0; u32 *r = NULL; uint w; uint intr_val = 0; bool fast = false; si_info_t *sii; sii = SI_INFO(sih); ASSERT(GOODIDX(coreidx)); ASSERT(regoff < SI_CORE_SIZE); ASSERT((val & ~mask) == 0); if (coreidx >= SI_MAXCORES) return 0; if (!fast) { INTR_OFF(sii, intr_val); /* save current core index */ origidx = si_coreidx(&sii->pub); /* switch core */ r = (u32 *) ((unsigned char *) sb_setcoreidx(&sii->pub, coreidx) + regoff); } ASSERT(r != NULL); /* mask and set */ if (mask || val) { if (regoff >= SBCONFIGOFF) { w = (R_SBREG(sii, r) & ~mask) | val; W_SBREG(sii, r, w); } else { w = (R_REG(sii->osh, r) & ~mask) | val; W_REG(sii->osh, r, w); } } /* readback */ if (regoff >= SBCONFIGOFF) w = R_SBREG(sii, r); else w = R_REG(sii->osh, r); if (!fast) { /* restore core index */ if (origidx != coreidx) sb_setcoreidx(&sii->pub, origidx); INTR_RESTORE(sii, intr_val); } return w; }
static void bcm5301x_bb_post_xfer(struct i2c_adapter *adap) { struct i2c_algo_bit_data *bit_data = adap->algo_data; struct bcm5301x_i2c_data *pdata = bit_data->data; chipcommonbregs_t *ccb = pdata->ccb; W_REG(SI_OSH, &ccb->smbus_config, R_REG(SI_OSH, &ccb->smbus_config) & ~BCM5301X_SMBUS_CFG_BITBANG_EN_B); }
/* Needs to happen when update to shadow SROM is needed * : Coming out of 'standby'/'hibernate' * : If pcie_war_aspm_ovr state changed */ static void pcie_war_aspm_clkreq(pcicore_info_t *pi) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; si_t *sih = pi->sih; uint16 val16, *reg16; uint32 w; if (!PCIEGEN1_ASPM(sih)) return; /* bypass this on QT or VSIM */ if (!ISSIM_ENAB(sih)) { reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET]; val16 = R_REG(pi->osh, reg16); val16 &= ~SRSH_ASPM_ENB; if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB) val16 |= SRSH_ASPM_ENB; else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB) val16 |= SRSH_ASPM_L1_ENB; else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB) val16 |= SRSH_ASPM_L0s_ENB; W_REG(pi->osh, reg16, val16); w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32)); w &= ~PCIE_ASPM_ENAB; w |= pi->pcie_war_aspm_ovr; OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32), w); } reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5]; val16 = R_REG(pi->osh, reg16); if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) { val16 |= SRSH_CLKREQ_ENB; pi->pcie_pr42767 = TRUE; } else val16 &= ~SRSH_CLKREQ_ENB; W_REG(pi->osh, reg16, val16); }
static uint16 chipphyrd(struct bcm4xxx *ch, uint phyaddr, uint reg) { bcmenetregs_t *regs; ASSERT(phyaddr < MAXEPHY); /* * BCM5222 dualphy shared mdio contortion. * remote phy: another emac controls our phy. */ if (ch->etc->mdcport != ch->etc->coreunit) { if (ch->etphy == NULL) { ch->etphy = et_phyfind(ch->et, ch->etc->mdcport); /* first time reset */ if (ch->etphy) chipphyreset(ch, ch->etc->phyaddr); } if (ch->etphy) return (et_phyrd(ch->etphy, phyaddr, reg)); else return (0xffff); } /* local phy: our emac controls our phy */ regs = ch->regs; /* clear mii_int */ W_REG(ch->osh, ®s->emacintstatus, EI_MII); /* issue the read */ W_REG(ch->osh, ®s->mdiodata, (MD_SB_START | MD_OP_READ | (phyaddr << MD_PMD_SHIFT) | (reg << MD_RA_SHIFT) | MD_TA_VALID)); /* wait for it to complete */ SPINWAIT(((R_REG(ch->osh, ®s->emacintstatus) & EI_MII) == 0), 100); if ((R_REG(ch->osh, ®s->emacintstatus) & EI_MII) == 0) { ET_ERROR(("et%d: chipphyrd: did not complete\n", ch->etc->unit)); } return (R_REG(ch->osh, ®s->mdiodata) & MD_DATA_MASK); }
/* * Initialize jtag master and return handle for * jtag_rwreg. Returns NULL on failure. */ void * hnd_jtagm_init(si_t *sih, uint clkd, bool exttap) { void *regs; osl_t *osh; osh = si_osh(sih); if ((regs = si_setcoreidx(sih, SI_CC_IDX)) != NULL) { chipcregs_t *cc = (chipcregs_t *) regs; uint32 tmp; /* * Determine jtagm availability from * core revision and capabilities. */ /* * Corerev 10 has jtagm, but the only chip * with it does not have a mips, and * the layout of the jtagcmd register is * different. We'll only accept >= 11. */ if (sih->ccrev < 11) return (NULL); if ((sih->cccaps & CC_CAP_JTAGP) == 0) return (NULL); /* Set clock divider if requested */ if (clkd != 0) { tmp = R_REG(osh, &cc->clkdiv); tmp = (tmp & ~CLKD_JTAG) | ((clkd << CLKD_JTAG_SHIFT) & CLKD_JTAG); W_REG(osh, &cc->clkdiv, tmp); } /* Enable jtagm */ tmp = JCTRL_EN | (exttap ? JCTRL_EXT_EN : 0); W_REG(osh, &cc->jtagctrl, tmp); } return (regs); }
static int pciegen2_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val, bool slave_bypass) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; uint32 *reg32; if (!PCIE_GEN2(pi->sih)) ASSERT(0); pcie2_mdiosetblock(pi, physmedia); /* enable mdio access to SERDES */ mdio_ctrl = MDIOCTL2_DIVISOR_VAL; mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); if (slave_bypass) mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; if (!write) mdio_ctrl |= MDIOCTL2_READ; W_REG(pi->osh, (&pcieregs->u.pcie2.mdiocontrol), mdio_ctrl); if (write) { reg32 = (uint32 *)&(pcieregs->u.pcie2.mdiowrdata); W_REG(pi->osh, reg32, *val | MDIODATA2_DONE); } else reg32 = (uint32 *)&(pcieregs->u.pcie2.mdiorddata); /* retry till the transaction is complete */ while (i < pcie_serdes_spinwait) { if (!(R_REG(pi->osh, reg32) & MDIODATA2_DONE)) { if (!write) *val = (R_REG(pi->osh, reg32) & MDIODATA2_MASK); return 0; } OSL_DELAY(1000); i++; } return 0; }
/* Needs to happen when update to shadow SROM is needed * : Coming out of 'standby'/'hibernate' * : If pcie_war_aspm_ovr state changed */ static void pcie_war_aspm_clkreq(pcicore_info_t *pi) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; si_t *sih = pi->sih; uint16 val16, *reg16; uint32 w; if (!PCIE_ASPM(sih)) return; /* PR43448 WAR: Enable ASPM in the shadow SROM and Link control */ /* bypass this on QT or VSIM */ if (sih->chippkg != HDLSIM_PKG_ID && sih->chippkg != HWSIM_PKG_ID) { reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET]; val16 = R_REG(pi->osh, reg16); if (!pi->pcie_war_aspm_ovr) val16 |= SRSH_ASPM_ENB; else val16 &= ~SRSH_ASPM_ENB; W_REG(pi->osh, reg16, val16); w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32)); if (!pi->pcie_war_aspm_ovr) w |= PCIE_ASPM_ENAB; else w &= ~PCIE_ASPM_ENAB; OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32), w); } /* PR42767 WAR: if clockreq is not advertized in SROM, advertize it */ reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5]; val16 = R_REG(pi->osh, reg16); if (!pi->pcie_war_aspm_ovr) { val16 |= SRSH_CLKREQ_ENB; pi->pcie_pr42767 = TRUE; } else val16 &= ~SRSH_CLKREQ_ENB; W_REG(pi->osh, reg16, val16); }
static void bcm5301x_xfer_trigger(struct bcm5301x_i2c_data *pdata, uint32 irq, uint32 cmd) { chipcommonbregs_t *ccb = pdata->ccb; irq |= BCM5301X_SMBUS_EVENT_ENABLE_MASTER_START_BUSY_B; cmd |= BCM5301X_SMBUS_MASTER_CMD_START_BUSY_COMM_B; BCM5301X_MSG_DBG("trigger cmd 0x%x irq 0x%x js %lu", cmd, irq, jiffies); /* Cleanup, for safety */ while (try_wait_for_completion(&pdata->done)) { BCM5301X_MSG_ERR("spurious irq"); } /* Enable interrupt */ W_REG(SI_OSH, &ccb->smbus_event_enable, irq); /* Start transaction */ W_REG(SI_OSH, &ccb->smbus_master_command, cmd); }
static void bcm5301x_reset_and_en(struct bcm5301x_i2c_data *pdata) { chipcommonbregs_t *ccb = pdata->ccb; bcm5301x_reset(pdata); W_REG(SI_OSH, &ccb->smbus_master_fifo_control, BCM5301X_SMBUS_MASTER_FIFO_CTRL_RX_FIFO_THRESH_S(BCM5301X_CFG_RX_THRESH)); W_REG(SI_OSH, &ccb->smbus_timing_config, (pdata->fast ? BCM5301X_SMBUS_TIMING_CFG_MODE_400_B : 0) | BCM5301X_SMBUS_TIMING_CFG_SMBUS_IDLE_TIME_S(BCM5301X_CFG_SMBUS_IDLE_TIME)); W_REG(SI_OSH, &ccb->smbus_config, BCM5301X_SMBUS_CFG_SMB_EN_B | BCM5301X_SMBUS_CFG_MASTER_RETRY_CNT_S(BCM5301X_CFG_SMBUS_RETRY_CNT)); BCM5301X_MSG_INFO("config 0x%x timing 0x%x", R_REG(SI_OSH, &ccb->smbus_config), R_REG(SI_OSH, &ccb->smbus_timing_config)); }
uint pcie_writereg(osl_t *osh, sbpcieregs_t *pcieregs, uint addrtype, uint offset, uint val) { ASSERT(pcieregs != NULL); switch (addrtype) { case PCIE_CONFIGREGS: W_REG(osh, (&pcieregs->configaddr), offset); W_REG(osh, (&pcieregs->configdata), val); break; case PCIE_PCIEREGS: W_REG(osh, (&pcieregs->pcieindaddr), offset); W_REG(osh, (&pcieregs->pcieinddata), val); break; default: ASSERT(0); break; } return 0; }
/* * We utilize chipcommon configuration register SBFlagSt to implement a * smart shared IRQ handling machenism through which only ISRs registered * for the SB cores that raised the interrupt are invoked. This machenism * relies on the SBFlagSt register's reliable recording of the SB cores * that raised the interrupt. */ void __init arch_init_irq(void) { int i; uint32 coreidx, mips_core_id; void *regs; if (BCM330X(current_cpu_data.processor_id)) mips_core_id = MIPS33_CORE_ID; else if (MIPS74K(current_cpu_data.processor_id)) mips_core_id = MIPS74K_CORE_ID; else { printk(KERN_ERR "MIPS CPU type %x unknown", current_cpu_data.processor_id); return; } /* Cache chipc and mips33 config registers */ ASSERT(bcm947xx_sih); coreidx = si_coreidx(bcm947xx_sih); regs = si_setcore(bcm947xx_sih, mips_core_id, 0); mipsirq = si_irq(bcm947xx_sih); if (bcm947xx_sih->socitype == SOCI_SB) { if (regs) mipssbr = (sbconfig_t *)((ulong)regs + SBCONFIGOFF); if ((regs = si_setcore(bcm947xx_sih, CC_CORE_ID, 0))) ccsbr = (sbconfig_t *)((ulong)regs + SBCONFIGOFF); } si_setcoreidx(bcm947xx_sih, coreidx); if (BCM330X(current_cpu_data.processor_id)) { /* Cache mips33 sbintvec register */ if (mipssbr) shints = R_REG(NULL, &mipssbr->sbintvec); } else { uint32 *intmask; /* Use intmask5 register to route the timer interrupt */ intmask = (uint32 *) &((mips74kregs_t *)regs)->intmask[5]; W_REG(NULL, intmask, 1 << 31); intmask = (uint32 *) &((mips74kregs_t *)regs)->intmask[0]; shints = R_REG(NULL, intmask); /* Save the pointer to mips core registers */ mips_corereg = regs; } /* Install interrupt controllers */ for (i = 0; i < NR_IRQS; i++) { set_irq_chip(i, (i < SBMIPS_NUMIRQS ? &brcm_irq_type : &brcm_irq2_type)); } }
static void sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v) { uint8 tmp; volatile uint32 dummy; uint32 intr_val = 0; /* * compact flash only has 11 bits address, while we needs 12 bits address. * MEM_SEG will be OR'd with other 11 bits address in hardware, * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special */ if (PCMCIA(sii)) { INTR_OFF(sii, intr_val); tmp = 1; OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ } /* * WAR for PR18509, PR3864 and PR17322 * The config registers are always written as 32-bits. If write 16 bits, * the other 16 bits are random, which needs to be controlled to avoid side-effect. * This is required only for PCMCIA bus. */ if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) { #ifdef IL_BIGENDIAN dummy = R_REG(sii->osh, sbr); W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); dummy = R_REG(sii->osh, sbr); W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); #else dummy = R_REG(sii->osh, sbr); W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); dummy = R_REG(sii->osh, sbr); W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); #endif /* IL_BIGENDIAN */ } else
uint pcie_writereg(si_t *sih, sbpcieregs_t *pcieregs, uint addrtype, uint offset, uint val) { osl_t *osh = si_osh(sih); ASSERT(pcieregs != NULL); BCM_REFERENCE(osh); if ((BUSTYPE(sih->bustype) == SI_BUS) || PCIE_GEN1(sih)) { switch (addrtype) { case PCIE_CONFIGREGS: W_REG(osh, (&pcieregs->configaddr), offset); W_REG(osh, (&pcieregs->configdata), val); break; case PCIE_PCIEREGS: W_REG(osh, (&pcieregs->u.pcie1.pcieindaddr), offset); W_REG(osh, (&pcieregs->u.pcie1.pcieinddata), val); break; default: ASSERT(0); break; } } else if (PCIE_GEN2(sih)) { W_REG(osh, (&pcieregs->configaddr), offset); W_REG(osh, (&pcieregs->configdata), val); } return 0; }
void dma_rxinit(dma_info_t *di) { DMA_TRACE(("%s: dma_rxinit\n", di->name)); di->rxin = di->rxout = 0; /* clear rx descriptor ring */ BZERO_SM((void*)di->rxd, (di->nrxd * sizeof (dmadd_t))); dma_rxenable(di); W_REG(&di->regs->rcvaddr, ((uint32)di->rxdpa + di->ddoffset)); }
static void __init sb_extif_serial_init(sb_t * sbh, void *regs, sb_serial_init_fn add) { osl_t *osh = sb_osh(sbh); extifregs_t *eir = (extifregs_t *) regs; sbconfig_t *sb; ulong base; uint irq; int i, n; /* Determine external UART register base */ sb = (sbconfig_t *) ((ulong) eir + SBCONFIGOFF); base = EXTIF_CFGIF_BASE(sb_base(R_REG(osh, &sb->sbadmatch1))); /* Determine IRQ */ irq = sb_irq(sbh); /* Disable GPIO interrupt initially */ W_REG(osh, &eir->gpiointpolarity, 0); W_REG(osh, &eir->gpiointmask, 0); /* Search for external UARTs */ n = 2; for (i = 0; i < 2; i++) { regs = (void *)REG_MAP(base + (i * 8), 8); if (serial_exists(osh, regs)) { /* Set GPIO 1 to be the external UART IRQ */ W_REG(osh, &eir->gpiointmask, 2); /* XXXDetermine external UART clock */ if (add) add(regs, irq, 13500000, 0); } } /* Add internal UART if enabled */ if (R_REG(osh, &eir->corecontrol) & CC_UE) if (add) add((void *)&eir->uartdata, irq, sb_clock(sbh), 2); }
/* Called after hw_params and before trigger(start). */ static int bcm947xx_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; bcm947xx_i2s_info_t *snd_bcm = rtd->dai->cpu_dai->private_data; struct snd_pcm_runtime *runtime = substream->runtime; struct bcm947xx_runtime_data *brtd = runtime->private_data; uint32 intmask = R_REG(snd_bcm->osh, &snd_bcm->regs->intmask); uint32 intstatus = 0; unsigned long flags; int ret = 0; DBG("%s %s\n", __FUNCTION__, bcm947xx_direction_str(substream)); spin_lock_irqsave(&brtd->lock, flags); /* Reset s/w DMA accounting. */ brtd->dma_pos = brtd->dma_start; brtd->dma_loaded = brtd->bytes_pending = 0; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { intmask |= intmask_capture; intstatus |= intmask_capture; } else { intmask |= intmask_playback; intstatus |= intmask_playback; } /* Clear any pending interrupts. */ W_REG(snd_bcm->osh, &snd_bcm->regs->intstatus, intstatus); /* Enable interrupts. */ W_REG(snd_bcm->osh, &snd_bcm->regs->intmask, intmask); spin_unlock_irqrestore(&brtd->lock, flags); DBG("%s: i2s intstatus 0x%x intmask 0x%x\n", __FUNCTION__, R_REG(snd_bcm->osh, &snd_bcm->regs->intstatus), R_REG(snd_bcm->osh, &snd_bcm->regs->intmask)); return ret; }
static void _switch_reg_write(void *rinfo, uint8 page, uint8 offset, uint64 value) { uint32 regval; uint32 timeout = ROBO_POLL_TIMEOUT; robo_info_t *robo = (robo_info_t *)rinfo; si_info_t *sii; osl_t *osh; nssrabregs_t *regs; /* pointer to chip registers */ sii = SI_INFO((si_t*)robo->sbh); osh = sii->osh; regs = robo->regs; COMPILER_REFERENCE(osh); _switch_request_grant(rinfo); /* Load the value to write */ W_REG(osh, ®s->chipcommonb_srab_wdh, (uint32)(value >> 32)); W_REG(osh, ®s->chipcommonb_srab_wdl, (uint32)(value)); /* Issue the write command */ regval = ((page << CHIPCOMMONB_SRAB_CMDSTAT_SRA_PAGE_SHIFT) | (offset << CHIPCOMMONB_SRAB_CMDSTAT_SRA_OFFSET_SHIFT) | CHIPCOMMONB_SRAB_CMDSTAT_SRA_GORDYN_MASK | CHIPCOMMONB_SRAB_CMDSTAT_SRA_WRITE_MASK); W_REG(osh, ®s->chipcommonb_srab_cmdstat, regval); /* Wait for command complete */ while (R_REG(osh, ®s->chipcommonb_srab_cmdstat) & CHIPCOMMONB_SRAB_CMDSTAT_SRA_GORDYN_MASK) { if (!--timeout) { SRAB_ERR(("robo_write: timeout")); _switch_interface_reset(rinfo); break; } } _switch_release_grant(rinfo); }
/* Needs to happen when coming out of 'standby'/'hibernate' */ static void BCMINITFN(pcie_misc_config_fixup)(pcicore_info_t *pi) { sbpcieregs_t *pcieregs = pi->regs.pcieregs; uint16 val16, *reg16; reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG]; val16 = R_REG(pi->osh, reg16); if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) { val16 |= SRSH_L23READY_EXIT_NOPERST; W_REG(pi->osh, reg16, val16); } }
/* ***** Register Access API */ uint pcie_readreg(osl_t *osh, sbpcieregs_t *pcieregs, uint addrtype, uint offset) { uint retval = 0xFFFFFFFF; ASSERT(pcieregs != NULL); switch (addrtype) { case PCIE_CONFIGREGS: W_REG(osh, (&pcieregs->configaddr), offset); retval = R_REG(osh, &(pcieregs->configdata)); break; case PCIE_PCIEREGS: W_REG(osh, &(pcieregs->pcieindaddr), offset); retval = R_REG(osh, &(pcieregs->pcieinddata)); break; default: ASSERT(0); break; } return retval; }