static ssize_t gfar_set_fifo_starve_off(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gfar_private *priv = netdev_priv(to_net_dev(dev)); struct gfar __iomem *regs = priv->gfargrp[0].regs; unsigned int num = simple_strtoul(buf, NULL, 0); u32 temp; unsigned long flags; if (num > GFAR_MAX_FIFO_STARVE_OFF) return count; local_irq_save(flags); lock_tx_qs(priv); priv->fifo_starve_off = num; temp = gfar_read(®s->fifo_tx_starve_shutoff); temp &= ~FIFO_TX_STARVE_OFF_MASK; temp |= num; gfar_write(®s->fifo_tx_starve_shutoff, temp); unlock_tx_qs(priv); local_irq_restore(flags); return count; }
static ssize_t gfar_set_fifo_threshold(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gfar_private *priv = netdev_priv(to_net_dev(dev)); struct gfar __iomem *regs = priv->gfargrp[0].regs; unsigned int length = simple_strtoul(buf, NULL, 0); u32 temp; unsigned long flags; if (length > GFAR_MAX_FIFO_THRESHOLD) return count; local_irq_save(flags); lock_tx_qs(priv); priv->fifo_threshold = length; temp = gfar_read(®s->fifo_tx_thr); temp &= ~FIFO_TX_THR_MASK; temp |= length; gfar_write(®s->fifo_tx_thr, temp); unlock_tx_qs(priv); local_irq_restore(flags); return count; }
/* Change the current ring parameters, stopping the controller if * necessary so that we don't mess things up while we're in * motion. We wait for the ring to be clean before reallocating * the rings. */ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) { struct gfar_private *priv = netdev_priv(dev); int err = 0, i = 0; if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) return -EINVAL; if (!is_power_of_2(rvals->rx_pending)) { printk("%s: Ring sizes must be a power of 2\n", dev->name); return -EINVAL; } if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) return -EINVAL; if (!is_power_of_2(rvals->tx_pending)) { printk("%s: Ring sizes must be a power of 2\n", dev->name); return -EINVAL; } if (dev->flags & IFF_UP) { unsigned long flags; /* Halt TX and RX, and process the frames which * have already been received */ local_irq_save(flags); lock_tx_qs(priv); lock_rx_qs(priv); gfar_halt(dev); unlock_rx_qs(priv); unlock_tx_qs(priv); local_irq_restore(flags); for (i = 0; i < priv->num_rx_queues; i++) gfar_clean_rx_ring(priv->rx_queue[i], priv->rx_queue[i]->rx_ring_size); /* Now we take down the rings to rebuild them */ stop_gfar(dev); } /* Change the size */ for (i = 0; i < priv->num_rx_queues; i++) { priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size; } /* Rebuild the rings with the new size */ if (dev->flags & IFF_UP) { err = startup_gfar(dev); netif_tx_wake_all_queues(dev); } return err; }
static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) { struct gfar_private *priv = netdev_priv(dev); unsigned long flags; int err = 0, i = 0; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) return -EOPNOTSUPP; if (dev->flags & IFF_UP) { /* Halt TX and RX, and process the frames which * have already been received */ local_irq_save(flags); lock_tx_qs(priv); lock_rx_qs(priv); gfar_halt(dev); unlock_tx_qs(priv); unlock_rx_qs(priv); local_irq_save(flags); for (i = 0; i < priv->num_rx_queues; i++) gfar_clean_rx_ring(priv->rx_queue[i], priv->rx_queue[i]->rx_ring_size); /* Now we take down the rings to rebuild them */ stop_gfar(dev); } spin_lock_irqsave(&priv->bflock, flags); priv->rx_csum_enable = data; spin_unlock_irqrestore(&priv->bflock, flags); if (dev->flags & IFF_UP) { err = startup_gfar(dev); netif_tx_wake_all_queues(dev); } return err; }
int gfar_set_features(struct net_device *dev, netdev_features_t features) { struct gfar_private *priv = netdev_priv(dev); unsigned long flags; int err = 0, i = 0; netdev_features_t changed = dev->features ^ features; if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) gfar_vlan_mode(dev, features); if (!(changed & NETIF_F_RXCSUM)) return 0; if (dev->flags & IFF_UP) { /* Halt TX and RX, and process the frames which * have already been received */ local_irq_save(flags); lock_tx_qs(priv); lock_rx_qs(priv); gfar_halt(dev); unlock_tx_qs(priv); unlock_rx_qs(priv); local_irq_restore(flags); for (i = 0; i < priv->num_rx_queues; i++) gfar_clean_rx_ring(priv->rx_queue[i], priv->rx_queue[i]->rx_ring_size); /* Now we take down the rings to rebuild them */ stop_gfar(dev); dev->features = features; err = startup_gfar(dev); netif_tx_wake_all_queues(dev); } return err; }