static int ixgbevf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; int i, err = 0; u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD); new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD); new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD); new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring->count) && (new_rx_count == adapter->rx_ring->count)) { /* nothing to do */ return 0; } while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) msleep(1); /* * If the adapter isn't up and running then just set the * new parameters and scurry for the exits. */ if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i].count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i].count = new_rx_count; adapter->tx_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } tx_ring = kcalloc(adapter->num_tx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!tx_ring) { err = -ENOMEM; goto clear_reset; } rx_ring = kcalloc(adapter->num_rx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!rx_ring) { err = -ENOMEM; goto err_rx_setup; } ixgbevf_down(adapter); memcpy(tx_ring, adapter->tx_ring, adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); for (i = 0; i < adapter->num_tx_queues; i++) { tx_ring[i].count = new_tx_count; err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_tx_resources(adapter, &tx_ring[i]); } goto err_tx_ring_setup; } tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; } memcpy(rx_ring, adapter->rx_ring, adapter->num_rx_queues * sizeof(struct ixgbevf_ring)); for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring[i].count = new_rx_count; err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_rx_resources(adapter, &rx_ring[i]); } goto err_rx_ring_setup; } rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; } /* * Only switch to new rings if all the prior allocations * and ring setups have succeeded. */ kfree(adapter->tx_ring); adapter->tx_ring = tx_ring; adapter->tx_ring_count = new_tx_count; kfree(adapter->rx_ring); adapter->rx_ring = rx_ring; adapter->rx_ring_count = new_rx_count; /* success! */ ixgbevf_up(adapter); goto clear_reset; err_rx_ring_setup: for(i = 0; i < adapter->num_tx_queues; i++) ixgbevf_free_tx_resources(adapter, &tx_ring[i]); err_tx_ring_setup: kfree(rx_ring); err_rx_setup: kfree(tx_ring); clear_reset: clear_bit(__IXGBEVF_RESETTING, &adapter->state); return err; }
static int ixgbevf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; u32 new_rx_count, new_tx_count; int i, err = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); /* if nothing to do return success */ if ((new_tx_count == adapter->tx_ring_count) && (new_rx_count == adapter->rx_ring_count)) return 0; while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->count = new_rx_count; adapter->tx_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } if (new_tx_count != adapter->tx_ring_count) { tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); if (!tx_ring) { err = -ENOMEM; goto clear_reset; } for (i = 0; i < adapter->num_tx_queues; i++) { /* clone ring and setup updated count */ tx_ring[i] = *adapter->tx_ring[i]; tx_ring[i].count = new_tx_count; err = ixgbevf_setup_tx_resources(&tx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_tx_resources(&tx_ring[i]); } vfree(tx_ring); tx_ring = NULL; goto clear_reset; } } } if (new_rx_count != adapter->rx_ring_count) { rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); if (!rx_ring) { err = -ENOMEM; goto clear_reset; } for (i = 0; i < adapter->num_rx_queues; i++) { /* clone ring and setup updated count */ rx_ring[i] = *adapter->rx_ring[i]; rx_ring[i].count = new_rx_count; err = ixgbevf_setup_rx_resources(&rx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_rx_resources(&rx_ring[i]); } vfree(rx_ring); rx_ring = NULL; goto clear_reset; } } } /* bring interface down to prepare for update */ ixgbevf_down(adapter); /* Tx */ if (tx_ring) { for (i = 0; i < adapter->num_tx_queues; i++) { ixgbevf_free_tx_resources(adapter->tx_ring[i]); *adapter->tx_ring[i] = tx_ring[i]; } adapter->tx_ring_count = new_tx_count; vfree(tx_ring); tx_ring = NULL; } /* Rx */ if (rx_ring) { for (i = 0; i < adapter->num_rx_queues; i++) { ixgbevf_free_rx_resources(adapter->rx_ring[i]); *adapter->rx_ring[i] = rx_ring[i]; } adapter->rx_ring_count = new_rx_count; vfree(rx_ring); rx_ring = NULL; } /* restore interface using new values */ ixgbevf_up(adapter); clear_reset: /* free Tx resources if Rx error is encountered */ if (tx_ring) { for (i = 0; i < adapter->num_tx_queues; i++) ixgbevf_free_tx_resources(&tx_ring[i]); vfree(tx_ring); } clear_bit(__IXGBEVF_RESETTING, &adapter->state); return err; }