static irqreturn_t ixpdev_interrupt(int irq, void *dev_id) { u32 status; status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0); if (status == 0) return IRQ_NONE; /* * Any of the eight receive units signaled RX? */ if (status & 0x00ff) { struct net_device *dev = nds[0]; struct ixpdev_priv *ip = netdev_priv(dev); ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); if (likely(napi_schedule_prep(&ip->napi))) { __netif_rx_schedule(&ip->napi); } else { printk(KERN_CRIT "ixp2000: irq while polling!!\n"); } } /* * Any of the eight transmit units signaled TXdone? */ if (status & 0xff00) { ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00); ixpdev_tx_complete(); } return IRQ_HANDLED; }
void ixp2400_msf_init(struct ixp2400_msf_parameters *mp) { u32 value; int i; ixp2400_pll_init(mp); value = ixp2000_reg_read(IXP2000_RESET0); ixp2000_reg_write(IXP2000_RESET0, value | 0x80); ixp2000_reg_write(IXP2000_RESET0, value & ~0x80); ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1); ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode); for (i = 0; i < 4; i++) { ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i, mp->rx_channel_mode[i]); } ixp2400_msf_free_rbuf_entries(mp); ixp2400_msf_enable_rx(mp); ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1); ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode); for (i = 0; i < 4; i++) { ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i, mp->tx_channel_mode[i]); } ixp2400_msf_enable_tx(mp); }
/* * LOCAL_CSR_STATUS=1 after a read or write to a microengine's CSR * space means that the microengine we tried to access was also trying * to access its own CSR space on the same clock cycle as we did. When * this happens, we lose the arbitration process by default, and the * read or write we tried to do was not actually performed, so we try * again until it succeeds. */ u32 ixp2000_uengine_csr_read(int uengine, int offset) { void *uebase; u32 *local_csr_status; u32 *reg; u32 value; uebase = ixp2000_uengine_csr_area(uengine); local_csr_status = (u32 *)(uebase + LOCAL_CSR_STATUS); reg = (u32 *)(uebase + offset); do { value = ixp2000_reg_read(reg); } while (ixp2000_reg_read(local_csr_status) & 1); return value; }
static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp) { u32 value; value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff; value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28; ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value); }
void ixp2000_uengine_reset(u32 uengine_mask) { u32 value; value = ixp2000_reg_read(IXP_RESET1) & ~ixp2000_uengine_mask; uengine_mask &= ixp2000_uengine_mask; ixp2000_reg_wrb(IXP_RESET1, value | uengine_mask); ixp2000_reg_wrb(IXP_RESET1, value); }
static int ixpdev_rx(struct net_device *dev, int processed, int budget) { while (processed < budget) { struct ixpdev_rx_desc *desc; struct sk_buff *skb; void *buf; u32 _desc; _desc = ixp2000_reg_read(RING_RX_DONE); if (_desc == 0) return 0; desc = rx_desc + ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc)); buf = phys_to_virt(desc->buf_addr); if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) { printk(KERN_ERR "ixp2000: rx err, length %d\n", desc->pkt_length); goto err; } if (desc->channel < 0 || desc->channel >= nds_count) { printk(KERN_ERR "ixp2000: rx err, channel %d\n", desc->channel); goto err; } /* @@@ Make FCS stripping configurable. */ desc->pkt_length -= 4; if (unlikely(!netif_running(nds[desc->channel]))) goto err; skb = netdev_alloc_skb(dev, desc->pkt_length + 2); if (likely(skb != NULL)) { skb_reserve(skb, 2); skb_copy_to_linear_data(skb, buf, desc->pkt_length); skb_put(skb, desc->pkt_length); skb->protocol = eth_type_trans(skb, nds[desc->channel]); dev->last_rx = jiffies; netif_receive_skb(skb); } err: ixp2000_reg_write(RING_RX_PENDING, _desc); processed++; } return processed; }
void ixp2000_uengine_csr_write(int uengine, int offset, u32 value) { void *uebase; u32 *local_csr_status; u32 *reg; uebase = ixp2000_uengine_csr_area(uengine); local_csr_status = (u32 *)(uebase + LOCAL_CSR_STATUS); reg = (u32 *)(uebase + offset); do { ixp2000_reg_write(reg, value); } while (ixp2000_reg_read(local_csr_status) & 1); }
static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp) { int rx_dual_clock; int tx_dual_clock; u32 value; rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK); tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK); value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL); value |= 0x0000f0f0; ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); value &= ~0x03000000; value |= (rx_dual_clock << 24) | (tx_dual_clock << 25); value &= ~0x00ff0000; value |= mp->rxclk01_multiplier << 16; value |= mp->rxclk23_multiplier << 18; value |= mp->txclk01_multiplier << 20; value |= mp->txclk23_multiplier << 22; ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15); ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7); ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); udelay(100); }
/* dev always points to nds[0]. */ static int ixpdev_poll(struct napi_struct *napi, int budget) { struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi); struct net_device *dev = ip->dev; int rx; rx = 0; do { ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff); rx = ixpdev_rx(dev, rx, budget); if (rx >= budget) break; } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); netif_rx_complete(napi); ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); return rx; }
static void ixpdev_tx_complete(void) { int channel; u32 wake; wake = 0; while (1) { struct ixpdev_priv *ip; u32 desc; int entry; desc = ixp2000_reg_read(RING_TX_DONE); if (desc == 0) break; /* @@@ Check whether entries come back in order. */ entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc); channel = tx_desc[entry].channel; if (channel < 0 || channel >= nds_count) { printk(KERN_ERR "ixp2000: txcomp channel index " "out of bounds (%d, %.8i, %d)\n", channel, (unsigned int)desc, entry); continue; } ip = netdev_priv(nds[channel]); if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) wake |= 1 << channel; ip->tx_queue_entries--; } for (channel = 0; wake != 0; channel++) { if (wake & (1 << channel)) { netif_wake_queue(nds[channel]); wake &= ~(1 << channel); } } }