static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gc->data = ioread32be(mm->regs + GPIO_DAT); }
void fman_dtsec_stop_rx(struct dtsec_regs *regs) { /* Assert the graceful stop bit */ iowrite32be(ioread32be(®s->rctrl) | RCTRL_GRS, ®s->rctrl); }
void fman_dtsec_start_tx(struct dtsec_regs *regs) { /* clear the graceful stop bit */ iowrite32be(ioread32be(®s->tctrl) & ~DTSEC_TCTRL_GTS, ®s->tctrl); }
static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, phy_interface_t iface, u16 iface_speed, u8 *macaddr, u32 exception_mask, u8 tbi_addr) { bool is_rgmii, is_sgmii, is_qsgmii; int i; u32 tmp; /* Soft reset */ iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1); iowrite32be(0, ®s->maccfg1); /* dtsec_id2 */ tmp = ioread32be(®s->tsec_id2); /* check RGMII support */ if (iface == PHY_INTERFACE_MODE_RGMII || iface == PHY_INTERFACE_MODE_RGMII_ID || iface == PHY_INTERFACE_MODE_RGMII_RXID || iface == PHY_INTERFACE_MODE_RGMII_TXID || iface == PHY_INTERFACE_MODE_RMII) if (tmp & DTSEC_ID2_INT_REDUCED_OFF) return -EINVAL; if (iface == PHY_INTERFACE_MODE_SGMII || iface == PHY_INTERFACE_MODE_MII) if (tmp & DTSEC_ID2_INT_REDUCED_OFF) return -EINVAL; is_rgmii = iface == PHY_INTERFACE_MODE_RGMII || iface == PHY_INTERFACE_MODE_RGMII_ID || iface == PHY_INTERFACE_MODE_RGMII_RXID || iface == PHY_INTERFACE_MODE_RGMII_TXID; is_sgmii = iface == PHY_INTERFACE_MODE_SGMII; is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII; tmp = 0; if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII) tmp |= DTSEC_ECNTRL_GMIIM; if (is_sgmii) tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM); if (is_qsgmii) tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_QSGMIIM); if (is_rgmii) tmp |= DTSEC_ECNTRL_RPM; if (iface_speed == SPEED_100) tmp |= DTSEC_ECNTRL_R100M; iowrite32be(tmp, ®s->ecntrl); tmp = 0; if (cfg->tx_pause_time) tmp |= cfg->tx_pause_time; if (cfg->tx_pause_time_extd) tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT; iowrite32be(tmp, ®s->ptv); tmp = 0; tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK; /* Accept short frames */ tmp |= RCTRL_RSF; iowrite32be(tmp, ®s->rctrl); /* Assign a Phy Address to the TBI (TBIPA). * Done also in cases where TBI is not selected to avoid conflict with * the external PHY's Physical address */ iowrite32be(tbi_addr, ®s->tbipa); iowrite32be(0, ®s->tmr_ctrl); if (cfg->ptp_tsu_en) { tmp = 0; tmp |= TMR_PEVENT_TSRE; iowrite32be(tmp, ®s->tmr_pevent); if (cfg->ptp_exception_en) { tmp = 0; tmp |= TMR_PEMASK_TSREEN; iowrite32be(tmp, ®s->tmr_pemask); } } tmp = 0; tmp |= MACCFG1_RX_FLOW; tmp |= MACCFG1_TX_FLOW; iowrite32be(tmp, ®s->maccfg1); tmp = 0; if (iface_speed < SPEED_1000) tmp |= MACCFG2_NIBBLE_MODE; else if (iface_speed == SPEED_1000) tmp |= MACCFG2_BYTE_MODE; tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) & MACCFG2_PREAMBLE_LENGTH_MASK; if (cfg->tx_pad_crc) tmp |= MACCFG2_PAD_CRC_EN; /* Full Duplex */ tmp |= MACCFG2_FULL_DUPLEX; iowrite32be(tmp, ®s->maccfg2); tmp = (((cfg->non_back_to_back_ipg1 << IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT) & IPGIFG_NON_BACK_TO_BACK_IPG_1) | ((cfg->non_back_to_back_ipg2 << IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT) & IPGIFG_NON_BACK_TO_BACK_IPG_2) | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT) & IPGIFG_MIN_IFG_ENFORCEMENT) | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG)); iowrite32be(tmp, ®s->ipgifg); tmp = 0; tmp |= HAFDUP_EXCESS_DEFER; tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT) & HAFDUP_RETRANSMISSION_MAX); tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW); iowrite32be(tmp, ®s->hafdup); /* Initialize Maximum frame length */ iowrite32be(cfg->maximum_frame, ®s->maxfrm); iowrite32be(0xffffffff, ®s->cam1); iowrite32be(0xffffffff, ®s->cam2); iowrite32be(exception_mask, ®s->imask); iowrite32be(0xffffffff, ®s->ievent); tmp = (u32)((macaddr[5] << 24) | (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]); iowrite32be(tmp, ®s->macstnaddr1); tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16)); iowrite32be(tmp, ®s->macstnaddr2); /* HASH */ for (i = 0; i < NUM_OF_HASH_REGS; i++) { /* Initialize IADDRx */ iowrite32be(0, ®s->igaddr[i]); /* Initialize GADDRx */ iowrite32be(0, ®s->gaddr[i]); } return 0; }
int fman_dtsec_init(struct dtsec_regs *regs, struct dtsec_cfg *cfg, enum enet_interface iface_mode, enum enet_speed iface_speed, uint8_t *macaddr, uint8_t fm_rev_maj, uint8_t fm_rev_min, uint32_t exception_mask) { bool is_rgmii = FALSE; bool is_sgmii = FALSE; bool is_qsgmii = FALSE; int i; uint32_t tmp; UNUSED(fm_rev_maj); UNUSED(fm_rev_min); /* let's start with a soft reset */ iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1); iowrite32be(0, ®s->maccfg1); /*************dtsec_id2******************/ tmp = ioread32be(®s->tsec_id2); /* check RGMII support */ if (iface_mode == E_ENET_IF_RGMII || iface_mode == E_ENET_IF_RMII) if (tmp & DTSEC_ID2_INT_REDUCED_OFF) return -EINVAL; if (iface_mode == E_ENET_IF_SGMII || iface_mode == E_ENET_IF_MII) if (tmp & DTSEC_ID2_INT_REDUCED_OFF) return -EINVAL; /***************ECNTRL************************/ is_rgmii = (bool)((iface_mode == E_ENET_IF_RGMII) ? TRUE : FALSE); is_sgmii = (bool)((iface_mode == E_ENET_IF_SGMII) ? TRUE : FALSE); is_qsgmii = (bool)((iface_mode == E_ENET_IF_QSGMII) ? TRUE : FALSE); tmp = 0; if (is_rgmii || iface_mode == E_ENET_IF_GMII) tmp |= DTSEC_ECNTRL_GMIIM; if (is_sgmii) tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM); if (is_qsgmii) tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_QSGMIIM); if (is_rgmii) tmp |= DTSEC_ECNTRL_RPM; if (iface_speed == E_ENET_SPEED_100) tmp |= DTSEC_ECNTRL_R100M; iowrite32be(tmp, ®s->ecntrl); /***************ECNTRL************************/ /***************TCTRL************************/ tmp = 0; if (cfg->halfdup_on) tmp |= DTSEC_TCTRL_THDF; if (cfg->tx_time_stamp_en) tmp |= DTSEC_TCTRL_TTSE; iowrite32be(tmp, ®s->tctrl); /***************TCTRL************************/ /***************PTV************************/ tmp = 0; #ifdef FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 if ((fm_rev_maj == 1) && (fm_rev_min == 0)) cfg->tx_pause_time += 2; #endif /* FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 */ if (cfg->tx_pause_time) tmp |= cfg->tx_pause_time; if (cfg->tx_pause_time_extd) tmp |= cfg->tx_pause_time_extd << PTV_PTE_OFST; iowrite32be(tmp, ®s->ptv); /***************RCTRL************************/ tmp = 0; tmp |= ((uint32_t)(cfg->rx_prepend & 0x0000001f)) << 16; if (cfg->rx_ctrl_acc) tmp |= RCTRL_CFA; if (cfg->rx_group_hash_exd) tmp |= RCTRL_GHTX; if (cfg->rx_time_stamp_en) tmp |= RCTRL_RTSE; if (cfg->rx_drop_bcast) tmp |= RCTRL_BC_REJ; if (cfg->rx_short_frm) tmp |= RCTRL_RSF; if (cfg->rx_promisc) tmp |= RCTRL_PROM; iowrite32be(tmp, ®s->rctrl); /***************RCTRL************************/ /* * Assign a Phy Address to the TBI (TBIPA). * Done also in cases where TBI is not selected to avoid conflict with * the external PHY's Physical address */ iowrite32be(cfg->tbipa, ®s->tbipa); /***************TMR_CTL************************/ iowrite32be(0, ®s->tmr_ctrl); if (cfg->ptp_tsu_en) { tmp = 0; tmp |= TMR_PEVENT_TSRE; iowrite32be(tmp, ®s->tmr_pevent); if (cfg->ptp_exception_en) { tmp = 0; tmp |= TMR_PEMASK_TSREEN; iowrite32be(tmp, ®s->tmr_pemask); } } /***************MACCFG1***********************/ tmp = 0; if (cfg->loopback) tmp |= MACCFG1_LOOPBACK; if (cfg->rx_flow) tmp |= MACCFG1_RX_FLOW; if (cfg->tx_flow) tmp |= MACCFG1_TX_FLOW; iowrite32be(tmp, ®s->maccfg1); /***************MACCFG1***********************/ /***************MACCFG2***********************/ tmp = 0; if (iface_speed < E_ENET_SPEED_1000) tmp |= MACCFG2_NIBBLE_MODE; else if (iface_speed == E_ENET_SPEED_1000) tmp |= MACCFG2_BYTE_MODE; tmp |= ((uint32_t) cfg->preamble_len & 0x0000000f) << PREAMBLE_LENGTH_SHIFT; if (cfg->rx_preamble) tmp |= MACCFG2_PRE_AM_Rx_EN; if (cfg->tx_preamble) tmp |= MACCFG2_PRE_AM_Tx_EN; if (cfg->rx_len_check) tmp |= MACCFG2_LENGTH_CHECK; if (cfg->tx_pad_crc) tmp |= MACCFG2_PAD_CRC_EN; if (cfg->tx_crc) tmp |= MACCFG2_CRC_EN; if (!cfg->halfdup_on) tmp |= MACCFG2_FULL_DUPLEX; iowrite32be(tmp, ®s->maccfg2); /***************MACCFG2***********************/ /***************IPGIFG************************/ tmp = 0; tmp = (((cfg->non_back_to_back_ipg1 << IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT) & IPGIFG_NON_BACK_TO_BACK_IPG_1) | ((cfg->non_back_to_back_ipg2 << IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT) & IPGIFG_NON_BACK_TO_BACK_IPG_2) | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT) & IPGIFG_MIN_IFG_ENFORCEMENT) | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG)); iowrite32be(tmp, ®s->ipgifg); /***************IPGIFG************************/ /***************HAFDUP************************/ tmp = 0; if (cfg->halfdup_alt_backoff_en) tmp = (uint32_t)(HAFDUP_ALT_BEB | ((cfg->halfdup_alt_backoff_val & 0x0000000f) << HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT)); if (cfg->halfdup_bp_no_backoff) tmp |= HAFDUP_BP_NO_BACKOFF; if (cfg->halfdup_no_backoff) tmp |= HAFDUP_NO_BACKOFF; if (cfg->halfdup_excess_defer) tmp |= HAFDUP_EXCESS_DEFER; tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT) & HAFDUP_RETRANSMISSION_MAX); tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW); iowrite32be(tmp, ®s->hafdup); /***************HAFDUP************************/ /***************MAXFRM************************/ /* Initialize MAXFRM */ iowrite32be(cfg->maximum_frame, ®s->maxfrm); /***************MAXFRM************************/ /***************CAM1************************/ iowrite32be(0xffffffff, ®s->cam1); iowrite32be(0xffffffff, ®s->cam2); /***************IMASK************************/ iowrite32be(exception_mask, ®s->imask); /***************IMASK************************/ /***************IEVENT************************/ iowrite32be(0xffffffff, ®s->ievent); /***************MACSTNADDR1/2*****************/ tmp = (uint32_t)((macaddr[5] << 24) | (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]); iowrite32be(tmp, ®s->macstnaddr1); tmp = (uint32_t)((macaddr[1] << 24) | (macaddr[0] << 16)); iowrite32be(tmp, ®s->macstnaddr2); /***************MACSTNADDR1/2*****************/ /*****************HASH************************/ for (i = 0; i < NUM_OF_HASH_REGS ; i++) { /* Initialize IADDRx */ iowrite32be(0, ®s->igaddr[i]); /* Initialize GADDRx */ iowrite32be(0, ®s->gaddr[i]); } fman_dtsec_reset_stat(regs); return 0; }
static u32 _temac_ior_be(struct temac_local *lp, int offset) { return ioread32be(lp->regs + offset); }
static void ulite_shutdown(struct uart_port *port) { iowrite32be(0, port->membase + ULITE_CONTROL); ioread32be(port->membase + ULITE_CONTROL); free_irq(port->irq, port); }
static u32 uartlite_inbe32(void __iomem *addr) { return ioread32be(addr); }
static unsigned int dw8250_serial_in32be(struct uart_port *p, int offset) { unsigned int value = ioread32be(p->membase + (offset << p->regshift)); return dw8250_modify_msr(p, offset, value); }
static int gef_gpio_get(struct gpio_chip *chip, unsigned offset) { struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); return !!(ioread32be(mmchip->regs + GEF_GPIO_IN) & BIT(offset)); }
static unsigned long bgpio_read32be(void __iomem *reg) { return ioread32be(reg); }
static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx, unsigned int reg) { return ioread32be(ctx->regs + reg); }
static u64 nps_clksrc_read(struct clocksource *clksrc) { int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET; return (u64)ioread32be(nps_msu_reg_low_addr[cluster]); }
static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); return ioread32be(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); }
int install_device(struct vmeio_device *dev, unsigned i) { unsigned int serial; memset(dev, 0, sizeof(*dev)); dev->lun = lun[i]; dev->debug = DEBUG; /* configure mmapped I/O */ if (base_address1_num && map(&dev->maps[0], base_address1[i], data_width1, am1, size1)) { printk(KERN_ERR PFX "could not map lun:%d, first space\n", dev->lun); goto out_map1; } /* check device absence */ serial = ioread32be(dev->maps[0].kernel_va); if (vme_bus_error_check(1)) { printk(KERN_ERR PFX "module %d not found at address 0x%08lx\n", dev->lun, base_address1[i]); dev->lun = -1; goto out_map2; } if (base_address2_num && map(&dev->maps[1], base_address2[i], data_width2, am2, size2)) { printk(KERN_ERR PFX "could not map lun:%d, second space\n", dev->lun); goto out_map2; } /* configure interrupt handling */ dev->vector = vector[i]; dev->level = level; dev->isrc = isrc; dev->timeout = msecs_to_jiffies(TIMEOUT); dev->icnt = 0; init_waitqueue_head(&dev->queue); if (dev->level && dev->vector && register_isr(dev, dev->vector, dev->level) < 0) { printk(KERN_ERR PFX "could not register isr " "for vector %d, level %d\n", dev->vector, dev->level); goto out_isr; } /* This will be eventually removed */ register_int_source(dev, dev->maps[0].kernel_va, dev->isrc); printk(KERN_INFO PFX "lun %2ld installed, base address 0x%08lx, serial 0x%x\n", lun[i], base_address1[i], serial); return 0; out_isr: vme_release_mapping(&dev->maps[1], 1); out_map2: vme_release_mapping(&dev->maps[0], 1); out_map1: printk(KERN_ERR PFX "lun %2ld not installed\n", lun[i]); return -ENODEV; }
static inline u8 oc_getreg_32be(struct ocores_i2c *i2c, int reg) { return ioread32be(i2c->base + (reg << i2c->reg_shift)); }
/** * temac_dma_in32_* - Memory mapped DMA read, these function expects a * register input that is based on DCR word addresses which are then * converted to memory mapped byte addresses. To be assigned to * lp->dma_in32. */ static u32 temac_dma_in32_be(struct temac_local *lp, int reg) { return ioread32be(lp->sdma_regs + (reg << 2)); }
uint32_t fman_rtc_get_timer_ctrl(struct rtc_regs *regs) { return ioread32be(®s->tmr_ctrl); }
static void ulite_start_tx(struct uart_port *port) { ulite_transmit(port, ioread32be(port->membase + ULITE_STATUS)); }
uint32_t fman_rtc_get_events(struct rtc_regs *regs) { return ioread32be(®s->tmr_tevent); }
static unsigned int timer_read32_be(void __iomem *addr) { return ioread32be(addr); }
uint32_t fman_rtc_get_event(struct rtc_regs *regs, uint32_t ev_mask) { return ioread32be(®s->tmr_tevent) & ev_mask; }
static void dtsec_isr(void *handle) { struct fman_mac *dtsec = (struct fman_mac *)handle; struct dtsec_regs __iomem *regs = dtsec->regs; u32 event; /* do not handle MDIO events */ event = ioread32be(®s->ievent) & (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN)); event &= ioread32be(®s->imask); iowrite32be(event, ®s->ievent); if (event & DTSEC_IMASK_BREN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX); if (event & DTSEC_IMASK_RXCEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL); if (event & DTSEC_IMASK_GTSCEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET); if (event & DTSEC_IMASK_BTEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX); if (event & DTSEC_IMASK_TXCEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL); if (event & DTSEC_IMASK_TXEEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR); if (event & DTSEC_IMASK_LCEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL); if (event & DTSEC_IMASK_CRLEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT); if (event & DTSEC_IMASK_XFUNEN) { /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */ if (dtsec->fm_rev_info.major == 2) { u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i; /* a. Write 0x00E0_0C00 to DTSEC_ID * This is a read only register * b. Read and save the value of TPKT */ tpkt1 = ioread32be(®s->tpkt); /* c. Read the register at dTSEC address offset 0x32C */ tmp_reg1 = ioread32be(®s->reserved02c0[27]); /* d. Compare bits [9:15] to bits [25:31] of the * register at address offset 0x32C. */ if ((tmp_reg1 & 0x007F0000) != (tmp_reg1 & 0x0000007F)) { /* If they are not equal, save the value of * this register and wait for at least * MAXFRM*16 ns */ usleep_range((u32)(min (dtsec_get_max_frame_length(dtsec) * 16 / 1000, 1)), (u32) (min(dtsec_get_max_frame_length (dtsec) * 16 / 1000, 1) + 1)); } /* e. Read and save TPKT again and read the register * at dTSEC address offset 0x32C again */ tpkt2 = ioread32be(®s->tpkt); tmp_reg2 = ioread32be(®s->reserved02c0[27]); /* f. Compare the value of TPKT saved in step b to * value read in step e. Also compare bits [9:15] of * the register at offset 0x32C saved in step d to the * value of bits [9:15] saved in step e. If the two * registers values are unchanged, then the transmit * portion of the dTSEC controller is locked up and * the user should proceed to the recover sequence. */ if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) == (tmp_reg2 & 0x007F0000))) { /* recover sequence */ /* a.Write a 1 to RCTRL[GRS] */ iowrite32be(ioread32be(®s->rctrl) | RCTRL_GRS, ®s->rctrl); /* b.Wait until IEVENT[GRSC]=1, or at least * 100 us has elapsed. */ for (i = 0; i < 100; i++) { if (ioread32be(®s->ievent) & DTSEC_IMASK_GRSCEN) break; udelay(1); } if (ioread32be(®s->ievent) & DTSEC_IMASK_GRSCEN) iowrite32be(DTSEC_IMASK_GRSCEN, ®s->ievent); else pr_debug("Rx lockup due to Tx lockup\n"); /* c.Write a 1 to bit n of FM_RSTC * (offset 0x0CC of FPM) */ fman_reset_mac(dtsec->fm, dtsec->mac_id); /* d.Wait 4 Tx clocks (32 ns) */ udelay(1); /* e.Write a 0 to bit n of FM_RSTC. */ /* cleared by FMAN */ } } dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN); } if (event & DTSEC_IMASK_MAGEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT); if (event & DTSEC_IMASK_GRSCEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET); if (event & DTSEC_IMASK_TDPEEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR); if (event & DTSEC_IMASK_RDPEEN) dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR); /* masked interrupts */ WARN_ON(event & DTSEC_IMASK_ABRTEN); WARN_ON(event & DTSEC_IMASK_IFERREN); }
uint32_t fman_rtc_get_interrupt_mask(struct rtc_regs *regs) { return ioread32be(®s->tmr_temask); }
uint16_t fman_dtsec_get_max_frame_len(struct dtsec_regs *regs) { return (uint16_t)ioread32be(®s->maxfrm); }
uint32_t fman_rtc_get_frequency_compensation(struct rtc_regs *regs) { return ioread32be(®s->tmr_add); }
void fman_dtsec_stop_tx(struct dtsec_regs *regs) { /* Assert the graceful stop bit */ iowrite32be(ioread32be(®s->tctrl) | DTSEC_TCTRL_GTS, ®s->tctrl); }
/* * fsl_ifc_ctrl_probe * * called by device layer when it finds a device matching * one our driver can handled. This code allocates all of * the resources needed for the controller only. The * resources for the NAND banks themselves are allocated * in the chip probe function. */ static int fsl_ifc_ctrl_probe(struct platform_device *dev) { int ret = 0; int version, banks; dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL); if (!fsl_ifc_ctrl_dev) return -ENOMEM; dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); /* IOMAP the entire IFC region */ fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); if (!fsl_ifc_ctrl_dev->regs) { dev_err(&dev->dev, "failed to get memory region\n"); ret = -ENODEV; goto err; } version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) & FSL_IFC_VERSION_MASK; banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", version >> 24, (version >> 16) & 0xf, banks); fsl_ifc_ctrl_dev->version = version; fsl_ifc_ctrl_dev->banks = banks; if (of_property_read_bool(dev->dev.of_node, "little-endian")) { fsl_ifc_ctrl_dev->little_endian = true; dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n"); } else { fsl_ifc_ctrl_dev->little_endian = false; dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); } version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) & FSL_IFC_VERSION_MASK; banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", version >> 24, (version >> 16) & 0xf, banks); fsl_ifc_ctrl_dev->version = version; fsl_ifc_ctrl_dev->banks = banks; /* get the Controller level irq */ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { dev_err(&dev->dev, "failed to get irq resource " "for IFC\n"); ret = -ENODEV; goto err; } /* get the nand machine irq */ fsl_ifc_ctrl_dev->nand_irq = irq_of_parse_and_map(dev->dev.of_node, 1); fsl_ifc_ctrl_dev->dev = &dev->dev; ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev); if (ret < 0) goto err; init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait); ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED, "fsl-ifc", fsl_ifc_ctrl_dev); if (ret != 0) { dev_err(&dev->dev, "failed to install irq (%d)\n", fsl_ifc_ctrl_dev->irq); goto err_irq; } if (fsl_ifc_ctrl_dev->nand_irq) { ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq, 0, "fsl-ifc-nand", fsl_ifc_ctrl_dev); if (ret != 0) { dev_err(&dev->dev, "failed to install irq (%d)\n", fsl_ifc_ctrl_dev->nand_irq); goto err_nandirq; } } return 0; err_nandirq: free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev); irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq); err_irq: free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); irq_dispose_mapping(fsl_ifc_ctrl_dev->irq); err: return ret; }
void fman_dtsec_start_rx(struct dtsec_regs *regs) { /* clear the graceful stop bit */ iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS, ®s->rctrl); }
static inline u8 fpgai2c_reg_get_32be(struct fpgalogic_i2c *i2c, int reg) { return ioread32be(i2c->base + (reg << i2c->reg_shift)); }