static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum, u32 op, u16 *data) { u32 mgtcr; struct zynq_gem_regs *regs = priv->iobase; int err; err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK, true, 20000, false); if (err) return err; /* Construct mgtcr mask for the operation */ mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op | (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) | (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data; /* Write mgtcr and wait for completion */ writel(mgtcr, ®s->phymntnc); err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK, true, 20000, false); if (err) return err; if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK) *data = readl(®s->phymntnc); return 0; }
int socfpga_bridges_reset(void) { int ret; /* Disable all the bridges (hps2fpga, lwhps2fpga, fpga2hps, fpga2sdram) */ /* set idle request to all bridges */ writel(ALT_SYSMGR_NOC_H2F_SET_MSK | ALT_SYSMGR_NOC_LWH2F_SET_MSK | ALT_SYSMGR_NOC_F2H_SET_MSK | ALT_SYSMGR_NOC_F2SDR0_SET_MSK | ALT_SYSMGR_NOC_F2SDR1_SET_MSK | ALT_SYSMGR_NOC_F2SDR2_SET_MSK, &sysmgr_regs->noc_idlereq_set); /* Enable the NOC timeout */ writel(ALT_SYSMGR_NOC_TMO_EN_SET_MSK, &sysmgr_regs->noc_timeout); /* Poll until all idleack to 1 */ ret = wait_for_bit_le32(&sysmgr_regs->noc_idleack, ALT_SYSMGR_NOC_H2F_SET_MSK | ALT_SYSMGR_NOC_LWH2F_SET_MSK | ALT_SYSMGR_NOC_F2H_SET_MSK | ALT_SYSMGR_NOC_F2SDR0_SET_MSK | ALT_SYSMGR_NOC_F2SDR1_SET_MSK | ALT_SYSMGR_NOC_F2SDR2_SET_MSK, true, 10000, false); if (ret) return ret; /* Poll until all idlestatus to 1 */ ret = wait_for_bit_le32(&sysmgr_regs->noc_idlestatus, ALT_SYSMGR_NOC_H2F_SET_MSK | ALT_SYSMGR_NOC_LWH2F_SET_MSK | ALT_SYSMGR_NOC_F2H_SET_MSK | ALT_SYSMGR_NOC_F2SDR0_SET_MSK | ALT_SYSMGR_NOC_F2SDR1_SET_MSK | ALT_SYSMGR_NOC_F2SDR2_SET_MSK, true, 10000, false); if (ret) return ret; /* Put all bridges (except NOR DDR scheduler) into reset state */ setbits_le32(&reset_manager_base->brgmodrst, (ALT_RSTMGR_BRGMODRST_H2F_SET_MSK | ALT_RSTMGR_BRGMODRST_LWH2F_SET_MSK | ALT_RSTMGR_BRGMODRST_F2H_SET_MSK | ALT_RSTMGR_BRGMODRST_F2SSDRAM0_SET_MSK | ALT_RSTMGR_BRGMODRST_F2SSDRAM1_SET_MSK | ALT_RSTMGR_BRGMODRST_F2SSDRAM2_SET_MSK)); /* Disable NOC timeout */ writel(0, &sysmgr_regs->noc_timeout); return 0; }
static int ehci_usb_remove(struct udevice *dev) { struct msm_ehci_priv *p = dev_get_priv(dev); struct usb_ehci *ehci = p->ehci; int ret; ret = ehci_deregister(dev); if (ret) return ret; /* Stop controller. */ clrbits_le32(&ehci->usbcmd, CMD_RUN); reset_usb_phy(p); ret = board_prepare_usb(USB_INIT_DEVICE); /* Board specific hook */ if (ret < 0) return ret; /* Reset controller */ setbits_le32(&ehci->usbcmd, CMD_RESET); /* Wait for reset */ if (wait_for_bit_le32(&ehci->usbcmd, CMD_RESET, false, 30, false)) { printf("Stuck on USB reset.\n"); return -ETIMEDOUT; } return 0; }
/* Initialize mii(MDIO) interface, discover which PHY is * attached to the device, and configure it properly. */ static int pic32_mii_init(struct pic32eth_dev *priv) { struct pic32_ectl_regs *ectl_p = priv->ectl_regs; struct pic32_emac_regs *emac_p = priv->emac_regs; /* board phy reset */ board_netphy_reset(priv); /* disable RX, TX & all transactions */ writel(ETHCON_ON | ETHCON_TXRTS | ETHCON_RXEN, &ectl_p->con1.clr); /* wait till busy */ wait_for_bit_le32(&ectl_p->stat.raw, ETHSTAT_BUSY, false, CONFIG_SYS_HZ, false); /* turn controller ON to access PHY over MII */ writel(ETHCON_ON, &ectl_p->con1.set); mdelay(10); /* reset MAC */ writel(EMAC_SOFTRESET, &emac_p->cfg1.set); /* reset assert */ mdelay(10); writel(EMAC_SOFTRESET, &emac_p->cfg1.clr); /* reset deassert */ /* initialize MDIO/MII */ if (priv->phyif == PHY_INTERFACE_MODE_RMII) { writel(EMAC_RMII_RESET, &emac_p->supp.set); mdelay(10); writel(EMAC_RMII_RESET, &emac_p->supp.clr); } return pic32_mdio_init(PIC32_MDIO_NAME, (ulong)&emac_p->mii); }
static void pic32_eth_stop(struct udevice *dev) { struct pic32eth_dev *priv = dev_get_priv(dev); struct pic32_ectl_regs *ectl_p = priv->ectl_regs; struct pic32_emac_regs *emac_p = priv->emac_regs; /* Reset the phy if the controller is enabled */ if (readl(&ectl_p->con1.raw) & ETHCON_ON) phy_reset(priv->phydev); /* Shut down the PHY */ phy_shutdown(priv->phydev); /* Stop rx/tx */ writel(ETHCON_TXRTS | ETHCON_RXEN, &ectl_p->con1.clr); mdelay(10); /* reset MAC */ writel(EMAC_SOFTRESET, &emac_p->cfg1.raw); /* clear reset */ writel(0, &emac_p->cfg1.raw); mdelay(10); /* disable controller */ writel(ETHCON_ON, &ectl_p->con1.clr); mdelay(10); /* wait until everything is down */ wait_for_bit_le32(&ectl_p->stat.raw, ETHSTAT_BUSY, false, 2 * CONFIG_SYS_HZ, false); /* clear any existing interrupt event */ writel(0xffffffff, &ectl_p->irq.clr); }
static int rpc_spi_wait_tend(struct udevice *dev) { struct rpc_spi_priv *priv = dev_get_priv(dev->parent); return wait_for_bit_le32((void *)priv->regs + RPC_CMNSR, RPC_CMNSR_TEND, true, 1000, false); }
static void zynqmp_qspi_fill_gen_fifo(struct zynqmp_qspi_priv *priv, u32 gqspi_fifo_reg) { struct zynqmp_qspi_regs *regs = priv->regs; u32 config_reg, ier; int ret = 0; config_reg = readl(®s->confr); /* Manual start if needed */ if (config_reg & GQSPI_GEN_FIFO_STRT_MOD) { config_reg |= GQSPI_STRT_GEN_FIFO; writel(config_reg, ®s->confr); /* Enable interrupts */ ier = readl(®s->ier); ier |= GQSPI_IXR_ALL_MASK; writel(ier, ®s->ier); } /* Wait until the fifo is not full to write the new command */ ret = wait_for_bit_le32(®s->isr, GQSPI_IXR_GFNFULL_MASK, 1, GQSPI_TIMEOUT, 1); if (ret) printf("%s Timeout\n", __func__); writel(gqspi_fifo_reg, ®s->genfifo); }
/* start phy self calibration logic */ static int ddr2_phy_calib_start(void) { struct ddr2_phy_regs *ddr2_phy; ddr2_phy = ioremap(PIC32_DDR2P_BASE, sizeof(*ddr2_phy)); /* DDR Phy SCL Start */ writel(SCL_START | SCL_EN, &ddr2_phy->scl_start); /* Wait for SCL for data byte to pass */ return wait_for_bit_le32(&ddr2_phy->scl_start, SCL_LUBPASS, true, CONFIG_SYS_HZ, false); }
static int zynqmp_qspi_start_dma(struct zynqmp_qspi_priv *priv, u32 gen_fifo_cmd, u32 *buf) { u32 addr; u32 size, len; u32 actuallen = priv->len; int ret = 0; struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs; writel((unsigned long)buf, &dma_regs->dmadst); writel(roundup(priv->len, ARCH_DMA_MINALIGN), &dma_regs->dmasize); writel(GQSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier); addr = (unsigned long)buf; size = roundup(priv->len, ARCH_DMA_MINALIGN); flush_dcache_range(addr, addr + size); while (priv->len) { len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd); if (!(gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK) && (len % ARCH_DMA_MINALIGN)) { gen_fifo_cmd &= ~GENMASK(7, 0); gen_fifo_cmd |= roundup(len, ARCH_DMA_MINALIGN); } zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd); debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd); } ret = wait_for_bit_le32(&dma_regs->dmaisr, GQSPI_DMA_DST_I_STS_DONE, 1, GQSPI_TIMEOUT, 1); if (ret) { printf("DMA Timeout:0x%x\n", readl(&dma_regs->dmaisr)); return -ETIMEDOUT; } writel(GQSPI_DMA_DST_I_STS_DONE, &dma_regs->dmaisr); debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n", (unsigned long)buf, (unsigned long)priv->rx_buf, *buf, actuallen); if (buf != priv->rx_buf) memcpy(priv->rx_buf, buf, actuallen); return 0; }
static int zynq_gem_send(struct udevice *dev, void *ptr, int len) { u32 addr, size; struct zynq_gem_priv *priv = dev_get_priv(dev); struct zynq_gem_regs *regs = priv->iobase; struct emac_bd *current_bd = &priv->tx_bd[1]; /* Setup Tx BD */ memset(priv->tx_bd, 0, sizeof(struct emac_bd)); priv->tx_bd->addr = (ulong)ptr; priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) | ZYNQ_GEM_TXBUF_LAST_MASK; /* Dummy descriptor to mark it as the last in descriptor chain */ current_bd->addr = 0x0; current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK | ZYNQ_GEM_TXBUF_LAST_MASK| ZYNQ_GEM_TXBUF_USED_MASK; /* setup BD */ writel((ulong)priv->tx_bd, ®s->txqbase); addr = (ulong) ptr; addr &= ~(ARCH_DMA_MINALIGN - 1); size = roundup(len, ARCH_DMA_MINALIGN); flush_dcache_range(addr, addr + size); addr = (ulong)priv->rxbuffers; addr &= ~(ARCH_DMA_MINALIGN - 1); size = roundup((RX_BUF * PKTSIZE_ALIGN), ARCH_DMA_MINALIGN); flush_dcache_range(addr, addr + size); barrier(); /* Start transmit */ setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK); /* Read TX BD status */ if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED) printf("TX buffers exhausted in mid frame\n"); return wait_for_bit_le32(®s->txsr, ZYNQ_GEM_TSR_DONE, true, 20000, true); }
/* initializes the MAC and PHY, then establishes a link */ static void pic32_ctrl_reset(struct pic32eth_dev *priv) { struct pic32_ectl_regs *ectl_p = priv->ectl_regs; u32 v; /* disable RX, TX & any other transactions */ writel(ETHCON_ON | ETHCON_TXRTS | ETHCON_RXEN, &ectl_p->con1.clr); /* wait till busy */ wait_for_bit_le32(&ectl_p->stat.raw, ETHSTAT_BUSY, false, CONFIG_SYS_HZ, false); /* decrement received buffcnt to zero. */ while (readl(&ectl_p->stat.raw) & ETHSTAT_BUFCNT) writel(ETHCON_BUFCDEC, &ectl_p->con1.set); /* clear any existing interrupt event */ writel(0xffffffff, &ectl_p->irq.clr); /* clear RX/TX start address */ writel(0xffffffff, &ectl_p->txst.clr); writel(0xffffffff, &ectl_p->rxst.clr); /* clear the receive filters */ writel(0x00ff, &ectl_p->rxfc.clr); /* set the receive filters * ETH_FILT_CRC_ERR_REJECT * ETH_FILT_RUNT_REJECT * ETH_FILT_UCAST_ACCEPT * ETH_FILT_MCAST_ACCEPT * ETH_FILT_BCAST_ACCEPT */ v = ETHRXFC_BCEN | ETHRXFC_MCEN | ETHRXFC_UCEN | ETHRXFC_RUNTEN | ETHRXFC_CRCOKEN; writel(v, &ectl_p->rxfc.set); /* turn controller ON to access PHY over MII */ writel(ETHCON_ON, &ectl_p->con1.set); }
void mscc_switch_reset(bool enter) { /* Nasty workaround to avoid GPIO19 (DDR!) being reset */ mscc_gpio_set_alternate(19, 2); debug("applying SwC reset\n"); writel(ICPU_RESET_CORE_RST_PROTECT, BASE_CFG + ICPU_RESET); writel(PERF_SOFT_RST_SOFT_CHIP_RST, BASE_DEVCPU_GCB + PERF_SOFT_RST); if (wait_for_bit_le32(BASE_DEVCPU_GCB + PERF_SOFT_RST, PERF_SOFT_RST_SOFT_CHIP_RST, false, 5000, false)) pr_err("Tiemout while waiting for switch reset\n"); /* * Reset GPIO19 mode back as regular GPIO, output, high (DDR * not reset) (Order is important) */ setbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19)); writel(BIT(19), BASE_DEVCPU_GCB + PERF_GPIO_OUT_SET); mscc_gpio_set_alternate(19, 0); }
/* Enable bridges (hps2fpga, lwhps2fpga, fpga2hps, fpga2sdram) per handoff */ int socfpga_reset_deassert_bridges_handoff(void) { u32 mask_noc = 0, mask_rstmgr = 0; int i; for (i = 0; i < ARRAY_SIZE(bridge_cfg_tbl); i++) { if (get_bridge_init_val(gd->fdt_blob, bridge_cfg_tbl[i].compat_id)) { mask_noc |= bridge_cfg_tbl[i].mask_noc; mask_rstmgr |= bridge_cfg_tbl[i].mask_rstmgr; } } /* clear idle request to all bridges */ setbits_le32(&sysmgr_regs->noc_idlereq_clr, mask_noc); /* Release bridges from reset state per handoff value */ clrbits_le32(&reset_manager_base->brgmodrst, mask_rstmgr); /* Poll until all idleack to 0, timeout at 1000ms */ return wait_for_bit_le32(&sysmgr_regs->noc_idleack, mask_noc, false, 1000, false); }
/* wait until hardware is ready for another user access */ static int cpsw_mdio_wait_for_user_access(struct cpsw_mdio *mdio) { return wait_for_bit_le32(&mdio->regs->user[0].access, USERACCESS_GO, false, CPSW_MDIO_TIMEOUT, false); }
static int sun4i_spi_xfer(struct udevice *dev, unsigned int bitlen, const void *dout, void *din, unsigned long flags) { struct udevice *bus = dev->parent; struct sun4i_spi_priv *priv = dev_get_priv(bus); struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev); u32 len = bitlen / 8; u32 reg; u8 nbytes; int ret; priv->tx_buf = dout; priv->rx_buf = din; if (bitlen % 8) { debug("%s: non byte-aligned SPI transfer.\n", __func__); return -ENAVAIL; } if (flags & SPI_XFER_BEGIN) sun4i_spi_set_cs(bus, slave_plat->cs, true); reg = readl(&priv->regs->ctl); /* Reset FIFOs */ writel(reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST, &priv->regs->ctl); while (len) { /* Setup the transfer now... */ nbytes = min(len, (u32)(SUN4I_FIFO_DEPTH - 1)); /* Setup the counters */ writel(SUN4I_BURST_CNT(nbytes), &priv->regs->bc); writel(SUN4I_XMIT_CNT(nbytes), &priv->regs->tc); /* Fill the TX FIFO */ sun4i_spi_fill_fifo(priv, nbytes); /* Start the transfer */ reg = readl(&priv->regs->ctl); writel(reg | SUN4I_CTL_XCH, &priv->regs->ctl); /* Wait transfer to complete */ ret = wait_for_bit_le32(&priv->regs->ctl, SUN4I_CTL_XCH_MASK, false, SUN4I_SPI_TIMEOUT_US, false); if (ret) { printf("ERROR: sun4i_spi: Timeout transferring data\n"); sun4i_spi_set_cs(bus, slave_plat->cs, false); return ret; } /* Drain the RX FIFO */ sun4i_spi_drain_fifo(priv, nbytes); len -= nbytes; } if (flags & SPI_XFER_END) sun4i_spi_set_cs(bus, slave_plat->cs, false); return 0; }
static int zynqmp_qspi_fill_tx_fifo(struct zynqmp_qspi_priv *priv, u32 size) { u32 data, config_reg, ier; int ret = 0; struct zynqmp_qspi_regs *regs = priv->regs; u32 *buf = (u32 *)priv->tx_buf; u32 len = size; debug("TxFIFO: 0x%x, size: 0x%x\n", readl(®s->isr), size); config_reg = readl(®s->confr); /* Manual start if needed */ if (config_reg & GQSPI_GEN_FIFO_STRT_MOD) { config_reg |= GQSPI_STRT_GEN_FIFO; writel(config_reg, ®s->confr); /* Enable interrupts */ ier = readl(®s->ier); ier |= GQSPI_IXR_ALL_MASK; writel(ier, ®s->ier); } while (size) { ret = wait_for_bit_le32(®s->isr, GQSPI_IXR_TXNFULL_MASK, 1, GQSPI_TIMEOUT, 1); if (ret) { printf("%s: Timeout\n", __func__); return ret; } if (size >= 4) { writel(*buf, ®s->txd0r); buf++; size -= 4; } else { switch (size) { case 1: data = *((u8 *)buf); buf += 1; data |= GENMASK(31, 8); break; case 2: data = *((u16 *)buf); buf += 2; data |= GENMASK(31, 16); break; case 3: data = *((u16 *)buf); buf += 2; data |= (*((u8 *)buf) << 16); buf += 1; data |= GENMASK(31, 24); break; } writel(data, ®s->txd0r); size = 0; } } priv->tx_buf += len; return 0; }
struct mii_dev *cpsw_mdio_init(const char *name, u32 mdio_base, u32 bus_freq, int fck_freq) { struct cpsw_mdio *cpsw_mdio; int ret; cpsw_mdio = calloc(1, sizeof(*cpsw_mdio)); if (!cpsw_mdio) { debug("failed to alloc cpsw_mdio\n"); return NULL; } cpsw_mdio->bus = mdio_alloc(); if (!cpsw_mdio->bus) { debug("failed to alloc mii bus\n"); free(cpsw_mdio); return NULL; } cpsw_mdio->regs = (struct cpsw_mdio_regs *)mdio_base; if (!bus_freq || !fck_freq) cpsw_mdio->div = CPSW_MDIO_DIV_DEF; else cpsw_mdio->div = (fck_freq / bus_freq) - 1; cpsw_mdio->div &= CONTROL_DIV_MASK; /* set enable and clock divider */ writel(cpsw_mdio->div | CONTROL_ENABLE | CONTROL_FAULT | CONTROL_FAULT_ENABLE, &cpsw_mdio->regs->control); wait_for_bit_le32(&cpsw_mdio->regs->control, CONTROL_IDLE, false, CPSW_MDIO_TIMEOUT, true); /* * wait for scan logic to settle: * the scan time consists of (a) a large fixed component, and (b) a * small component that varies with the mii bus frequency. These * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x * silicon. Since the effect of (b) was found to be largely * negligible, we keep things simple here. */ mdelay(1); cpsw_mdio->bus->read = cpsw_mdio_read; cpsw_mdio->bus->write = cpsw_mdio_write; cpsw_mdio->bus->priv = cpsw_mdio; snprintf(cpsw_mdio->bus->name, sizeof(cpsw_mdio->bus->name), name); ret = mdio_register(cpsw_mdio->bus); if (ret < 0) { debug("failed to register mii bus\n"); goto free_bus; } return cpsw_mdio->bus; free_bus: mdio_free(cpsw_mdio->bus); free(cpsw_mdio); return NULL; }
static int atmel_spi_xfer(struct udevice *dev, unsigned int bitlen, const void *dout, void *din, unsigned long flags) { struct udevice *bus = dev_get_parent(dev); struct atmel_spi_platdata *bus_plat = dev_get_platdata(bus); struct at91_spi *reg_base = bus_plat->regs; u32 len_tx, len_rx, len; u32 status; const u8 *txp = dout; u8 *rxp = din; u8 value; if (bitlen == 0) goto out; /* * The controller can do non-multiple-of-8 bit * transfers, but this driver currently doesn't support it. * * It's also not clear how such transfers are supposed to be * represented as a stream of bytes...this is a limitation of * the current SPI interface. */ if (bitlen % 8) { /* Errors always terminate an ongoing transfer */ flags |= SPI_XFER_END; goto out; } len = bitlen / 8; /* * The controller can do automatic CS control, but it is * somewhat quirky, and it doesn't really buy us much anyway * in the context of U-Boot. */ if (flags & SPI_XFER_BEGIN) { atmel_spi_cs_activate(dev); /* * sometimes the RDR is not empty when we get here, * in theory that should not happen, but it DOES happen. * Read it here to be on the safe side. * That also clears the OVRES flag. Required if the * following loop exits due to OVRES! */ readl(®_base->rdr); } for (len_tx = 0, len_rx = 0; len_rx < len; ) { status = readl(®_base->sr); if (status & ATMEL_SPI_SR_OVRES) return -1; if ((len_tx < len) && (status & ATMEL_SPI_SR_TDRE)) { if (txp) value = *txp++; else value = 0; writel(value, ®_base->tdr); len_tx++; } if (status & ATMEL_SPI_SR_RDRF) { value = readl(®_base->rdr); if (rxp) *rxp++ = value; len_rx++; } } out: if (flags & SPI_XFER_END) { /* * Wait until the transfer is completely done before * we deactivate CS. */ wait_for_bit_le32(®_base->sr, ATMEL_SPI_SR_TXEMPTY, true, 1000, false); atmel_spi_cs_deactivate(dev); } return 0; }