static void tx_error (struct net_device *dev, int tx_status) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; int frame_id; int i; frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); np->stats.tx_errors++; /* Ttransmit Underrun */ if (tx_status & 0x10) { np->stats.tx_fifo_errors++; dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ dw16(ASICCtrl + 2, TxReset | DMAReset | FIFOReset | NetworkReset); /* Wait for ResetBusy bit clear */ for (i = 50; i > 0; i--) { if (!(dr16(ASICCtrl + 2) & ResetBusy)) break; mdelay (1); } rio_set_led_mode(dev); rio_free_tx (dev, 1); /* Reset TFDListPtr */ dw32(TFDListPtr0, np->tx_ring_dma + np->old_tx * sizeof (struct netdev_desc)); dw32(TFDListPtr1, 0); /* Let TxStartThresh stay default value */ } /* Late Collision */ if (tx_status & 0x04) { np->stats.tx_fifo_errors++; /* TxReset and clear FIFO */ dw16(ASICCtrl + 2, TxReset | FIFOReset); /* Wait reset done */ for (i = 50; i > 0; i--) { if (!(dr16(ASICCtrl + 2) & ResetBusy)) break; mdelay (1); } rio_set_led_mode(dev); /* Let TxStartThresh stay default value */ } /* Maximum Collisions */ #ifdef ETHER_STATS if (tx_status & 0x08) np->stats.collisions16++; #else if (tx_status & 0x08) np->stats.collisions++; #endif /* Restart the Tx */ dw32(MACCtrl, dr16(MACCtrl) | TxEnable); }
/* allocate and initialize Tx and Rx descriptors */ static void alloc_list (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; int i; np->cur_rx = np->cur_tx = 0; np->old_rx = np->old_tx = 0; np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].status = cpu_to_le64 (TFDDone); np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + ((i+1)%TX_RING_SIZE) * sizeof (struct netdev_desc)); } /* Initialize Rx descriptors */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + ((i + 1) % RX_RING_SIZE) * sizeof (struct netdev_desc)); np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; np->rx_skbuff[i] = NULL; } /* Allocate the rx buffers */ for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) break; /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64 ( pci_map_single ( np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); } /* Set RFDListPtr */ dw32(RFDListPtr0, np->rx_ring_dma); dw32(RFDListPtr1, 0); }
static void rio_hw_stop(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; /* Disable interrupts */ dw16(IntEnable, 0); /* Stop Tx and Rx logics */ dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); }
static void rio_set_led_mode(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; u32 mode; if (np->chip_id != CHIP_IP1000A) return; mode = dr32(ASICCtrl); mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); if (np->led_mode & 0x01) mode |= IPG_AC_LED_MODE; if (np->led_mode & 0x02) mode |= IPG_AC_LED_MODE_BIT_1; if (np->led_mode & 0x08) mode |= IPG_AC_LED_SPEED; dw32(ASICCtrl, mode); }
static irqreturn_t rio_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; unsigned int_status; int cnt = max_intrloop; int handled = 0; while (1) { int_status = dr16(IntStatus); dw16(IntStatus, int_status); int_status &= DEFAULT_INTR; if (int_status == 0 || --cnt < 0) break; handled = 1; /* Processing received packets */ if (int_status & RxDMAComplete) receive_packet (dev); /* TxDMAComplete interrupt */ if ((int_status & (TxDMAComplete|IntRequested))) { int tx_status; tx_status = dr32(TxStatus); if (tx_status & 0x01) tx_error (dev, tx_status); /* Free used tx skbuffs */ rio_free_tx (dev, 1); } /* Handle uncommon events */ if (int_status & (HostError | LinkEvent | UpdateStats)) rio_error (dev, int_status); } if (np->cur_tx != np->old_tx) dw32(CountDown, 100); return IRQ_RETVAL(handled); }
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; struct netdev_desc *txdesc; unsigned entry; u64 tfc_vlan_tag = 0; if (np->link_status == 0) { /* Link Down */ dev_kfree_skb(skb); return NETDEV_TX_OK; } entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; #if 0 if (skb->ip_summed == CHECKSUM_PARTIAL) { txdesc->status |= cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | IPChecksumEnable); } #endif if (np->vlan) { tfc_vlan_tag = VLANTagInsert | ((u64)np->vlan << 32) | ((u64)skb->priority << 45); } txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode * Work around: Always use 1 descriptor in 10Mbps mode */ if (entry % np->tx_coalesce == 0 || np->speed == 10) txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | WordAlignDisable | TxDMAIndicate | (1 << FragCountShift)); else txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | WordAlignDisable | (1 << FragCountShift)); /* TxDMAPollNow */ dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); /* Schedule ISR */ dw32(CountDown, 10000); np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE < TX_QUEUE_LEN - 1 && np->speed != 10) { /* do nothing */ } else if (!netif_queue_stopped(dev)) { netif_stop_queue (dev); } /* The first TFDListPtr */ if (!dr32(TFDListPtr0)) { dw32(TFDListPtr0, np->tx_ring_dma + entry * sizeof (struct netdev_desc)); dw32(TFDListPtr1, 0); } return NETDEV_TX_OK; }
static int rio_open (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; const int irq = np->pdev->irq; int i; u16 macctrl; i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); if (i) return i; /* Reset all logic functions */ dw16(ASICCtrl + 2, GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); mdelay(10); /* DebugCtrl bit 4, 5, 9 must set */ dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); /* Jumbo frame */ if (np->jumbo != 0) dw16(MaxFrameSize, MAX_JUMBO+14); alloc_list (dev); /* Get station address */ for (i = 0; i < 6; i++) dw8(StationAddr0 + i, dev->dev_addr[i]); set_multicast (dev); if (np->coalesce) { dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); } /* Set RIO to poll every N*320nsec. */ dw8(RxDMAPollPeriod, 0x20); dw8(TxDMAPollPeriod, 0xff); dw8(RxDMABurstThresh, 0x30); dw8(RxDMAUrgentThresh, 0x30); dw32(RmonStatMask, 0x0007ffff); /* clear statistics */ clear_stats (dev); /* VLAN supported */ if (np->vlan) { /* priority field in RxDMAIntCtrl */ dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); /* VLANId */ dw16(VLANId, np->vlan); /* Length/Type should be 0x8100 */ dw32(VLANTag, 0x8100 << 16 | np->vlan); /* Enable AutoVLANuntagging, but disable AutoVLANtagging. VLAN information tagged by TFC' VID, CFI fields. */ dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } setup_timer(&np->timer, rio_timer, (unsigned long)dev); np->timer.expires = jiffies + 1*HZ; add_timer (&np->timer); /* Start Tx/Rx */ dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; dw16(MACCtrl, macctrl); netif_start_queue (dev); dl2k_enable_int(np); return 0; }
static void rio_hw_init(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; int i; u16 macctrl; /* Reset all logic functions */ dw16(ASICCtrl + 2, GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); mdelay(10); rio_set_led_mode(dev); /* DebugCtrl bit 4, 5, 9 must set */ dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); if (np->chip_id == CHIP_IP1000A && (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { /* PHY magic taken from ipg driver, undocumented registers */ mii_write(dev, np->phy_addr, 31, 0x0001); mii_write(dev, np->phy_addr, 27, 0x01e0); mii_write(dev, np->phy_addr, 31, 0x0002); mii_write(dev, np->phy_addr, 27, 0xeb8e); mii_write(dev, np->phy_addr, 31, 0x0000); mii_write(dev, np->phy_addr, 30, 0x005e); /* advertise 1000BASE-T half & full duplex, prefer MASTER */ mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); } if (np->phy_media) mii_set_media_pcs(dev); else mii_set_media(dev); /* Jumbo frame */ if (np->jumbo != 0) dw16(MaxFrameSize, MAX_JUMBO+14); /* Set RFDListPtr */ dw32(RFDListPtr0, np->rx_ring_dma); dw32(RFDListPtr1, 0); /* Set station address */ /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works * too. However, it doesn't work on IP1000A so we use 16-bit access. */ for (i = 0; i < 3; i++) dw16(StationAddr0 + 2 * i, cpu_to_le16(((u16 *)dev->dev_addr)[i])); set_multicast (dev); if (np->coalesce) { dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); } /* Set RIO to poll every N*320nsec. */ dw8(RxDMAPollPeriod, 0x20); dw8(TxDMAPollPeriod, 0xff); dw8(RxDMABurstThresh, 0x30); dw8(RxDMAUrgentThresh, 0x30); dw32(RmonStatMask, 0x0007ffff); /* clear statistics */ clear_stats (dev); /* VLAN supported */ if (np->vlan) { /* priority field in RxDMAIntCtrl */ dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); /* VLANId */ dw16(VLANId, np->vlan); /* Length/Type should be 0x8100 */ dw32(VLANTag, 0x8100 << 16 | np->vlan); /* Enable AutoVLANuntagging, but disable AutoVLANtagging. VLAN information tagged by TFC' VID, CFI fields. */ dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } /* Start Tx/Rx */ dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; dw16(MACCtrl, macctrl); }