u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct mlx5e_priv *priv = netdev_priv(dev); int channel_ix = fallback(dev, skb); int up = 0; if (priv->params.num_rl_txqs) { u16 ix = mlx5e_select_queue_assigned(priv, skb); if (ix) { sk_tx_queue_set(skb->sk, ix); return ix; } } if (!netdev_get_num_tc(dev)) return channel_ix; if (skb_vlan_tag_present(skb)) up = skb->vlan_tci >> VLAN_PRIO_SHIFT; /* channel_ix can be larger than num_channels since * dev->num_real_tx_queues = num_channels * num_tc */ if (channel_ix >= priv->params.num_channels) channel_ix = reciprocal_scale(channel_ix, priv->params.num_channels); return priv->tc_to_txq_map[channel_ix][up]; }
/** * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB * @adapter: board private structure to initialize * * Cache the descriptor ring offsets for DCB to the assigned rings. * **/ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; unsigned int tx_idx, rx_idx; int tc, offset, rss_i, i; u8 num_tcs = netdev_get_num_tc(dev); /* verify we have DCB queueing enabled before proceeding */ if (num_tcs <= 1) return false; rss_i = adapter->ring_feature[RING_F_RSS].indices; for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { adapter->tx_ring[offset + i]->reg_idx = tx_idx; adapter->rx_ring[offset + i]->reg_idx = rx_idx; adapter->tx_ring[offset + i]->dcb_tc = tc; adapter->rx_ring[offset + i]->dcb_tc = tc; } } return true; }
static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, unsigned long cl) { struct net_device *dev = qdisc_dev(sch); unsigned long ntx = cl - 1 - netdev_get_num_tc(dev); if (ntx >= dev->num_tx_queues) return NULL; return netdev_get_tx_queue(dev, ntx); }
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; if (netdev_get_num_tc(dev)) return netdev_pick_tx(dev, skb, NULL); return netdev_pick_tx(dev, skb, NULL) % rings_p_up; }
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; if (netdev_get_num_tc(dev)) return fallback(dev, skb); return fallback(dev, skb) % rings_p_up; }
static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err = 0; __u8 max_tc = 0; __u8 map_chg = 0; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; if (!adapter->ixgbe_ieee_ets) { adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; /* initialize UP2TC mappings to invalid value */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) adapter->ixgbe_ieee_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS; /* if possible update UP2TC mappings from HW */ ixgbe_dcb_read_rtrup2tc(&adapter->hw, adapter->ixgbe_ieee_ets->prio_tc); } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i]) map_chg = 1; } memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); if (max_tc) max_tc++; if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; if (max_tc != netdev_get_num_tc(dev)) err = ixgbe_setup_tc(dev, max_tc); else if (map_chg) ixgbe_dcbnl_devreset(dev); if (err) goto err_out; err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); err_out: return err; }
static int bnxt_set_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; u32 rc = 0; if (channel->other_count || channel->combined_count || !channel->rx_count || !channel->tx_count) return -EINVAL; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; if (channel->rx_count > max_rx_rings || channel->tx_count > max_tx_rings) return -EINVAL; if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's * before PF unload */ } rc = bnxt_close_nic(bp, true, false); if (rc) { netdev_err(bp->dev, "Set channel failure rc :%x\n", rc); return rc; } } bp->rx_nr_rings = channel->rx_count; bp->tx_nr_rings_per_tc = channel->tx_count; bp->tx_nr_rings = bp->tx_nr_rings_per_tc; if (tcs > 1) bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); bp->num_stat_ctxs = bp->cp_nr_rings; if (netif_running(dev)) { rc = bnxt_open_nic(bp, true, false); if ((!rc) && BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's * to renable */ } } return rc; }
static unsigned int fm10k_max_channels(struct net_device *dev) { struct fm10k_intfc *interface = netdev_priv(dev); unsigned int max_combined = interface->hw.mac.max_queues; u8 tcs = netdev_get_num_tc(dev); /* For QoS report channels per traffic class */ if (tcs > 1) max_combined = 1 << (fls(max_combined / tcs) - 1); return max_combined; }
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, unsigned int *tx, unsigned int *rx) { struct net_device *dev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u8 num_tcs = netdev_get_num_tc(dev); *tx = 0; *rx = 0; switch (hw->mac.type) { case ixgbe_mac_82598EB: *tx = tc << 2; *rx = tc << 3; break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: if (num_tcs > 4) { if (tc < 3) { *tx = tc << 5; *rx = tc << 4; } else if (tc < 5) { *tx = ((tc + 2) << 4); *rx = tc << 4; } else if (tc < num_tcs) { *tx = ((tc + 8) << 3); *rx = tc << 4; } } else { *rx = tc << 5; switch (tc) { case 0: *tx = 0; break; case 1: *tx = 64; break; case 2: *tx = 96; break; case 3: *tx = 112; break; default: break; } } break; default: break; } }
/** * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { int err; /* We will try to get MSI-X interrupts first */ if (!ixgbe_acquire_msix_vectors(adapter)) return; /* At this point, we do not have MSI-X capabilities. We need to * reconfigure or disable various features which require MSI-X * capability. */ /* Disable DCB unless we only have a single traffic class */ if (netdev_get_num_tc(adapter->netdev) > 1) { e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); netdev_reset_tc(adapter->netdev); if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; } adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; /* Disable SR-IOV support */ e_dev_warn("Disabling SR-IOV support\n"); ixgbe_disable_sriov(adapter); /* Disable RSS */ e_dev_warn("Disabling RSS support\n"); adapter->ring_feature[RING_F_RSS].limit = 1; /* recalculate number of queues now that many features have been * changed or disabled. */ ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; err = pci_enable_msi(adapter->pdev); if (err) e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", err); else adapter->flags |= IXGBE_FLAG_MSI_ENABLED; }
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, unsigned int *tx, unsigned int *rx) { struct net_device *dev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u8 num_tcs = netdev_get_num_tc(dev); *tx = 0; *rx = 0; switch (hw->mac.type) { case ixgbe_mac_82598EB: /* TxQs/TC: 4 RxQs/TC: 8 */ *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 * TxQs/TC: 32 16 8 * RxQs/TC: 16 16 16 */ *rx = tc << 4; if (tc < 3) *tx = tc << 5; /* 0, 32, 64 */ else if (tc < 5) *tx = (tc + 2) << 4; /* 80, 96 */ else *tx = (tc + 8) << 3; /* 104, 112, 120 */ } else { /* * TCs : TC0 TC1 TC2/3 * TxQs/TC: 64 32 16 * RxQs/TC: 32 32 32 */ *rx = tc << 5; if (tc < 2) *tx = tc << 6; /* 0, 64 */ else *tx = (tc + 4) << 4; /* 96, 112 */ } default: break; } }
static void bnxt_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; channel->max_rx = max_rx_rings; channel->max_tx = max_tx_rings; channel->max_other = 0; channel->max_combined = 0; channel->rx_count = bp->rx_nr_rings; channel->tx_count = bp->tx_nr_rings_per_tc; }
static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err = 0; __u8 max_tc = 0; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; if (!adapter->ixgbe_ieee_ets) { adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; } memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; } if (max_tc) max_tc++; if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; if (max_tc != netdev_get_num_tc(dev)) err = ixgbe_setup_tc(dev, max_tc); if (err) goto err_out; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]); err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); err_out: return err; }
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) { int per_tc_q, q, i, offset = 0; struct net_device *dev = adapter->netdev; int tcs = netdev_get_num_tc(dev); if (!tcs) return false; /* Map queue offset and counts onto allocated tx queues */ per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); q = min_t(int, num_online_cpus(), per_tc_q); for (i = 0; i < tcs; i++) { netdev_set_tc_queue(dev, i, q, offset); offset += q; } adapter->num_tx_queues = q * tcs; adapter->num_rx_queues = q * tcs; #ifdef IXGBE_FCOE /* FCoE enabled queues require special configuration indexed * by feature specific indices and mask. Here we map FCoE * indices onto the DCB queue pairs allowing FCoE to own * configuration later. */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { u8 prio_tc[MAX_USER_PRIORITY] = {0}; int tc; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); tc = prio_tc[adapter->fcoe.up]; f->indices = dev->tc_to_txq[tc].count; f->mask = dev->tc_to_txq[tc].offset; } #endif return true; }
static int fm10k_set_channels(struct net_device *dev, struct ethtool_channels *ch) { struct fm10k_intfc *interface = netdev_priv(dev); unsigned int count = ch->combined_count; struct fm10k_hw *hw = &interface->hw; /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) return -EINVAL; /* verify other_count has not changed */ if (ch->other_count != NON_Q_VECTORS(hw)) return -EINVAL; /* verify the number of channels does not exceed hardware limits */ if (count > fm10k_max_channels(dev)) return -EINVAL; interface->ring_feature[RING_F_RSS].limit = count; /* use setup TC to update any traffic class queue mapping */ return fm10k_setup_tc(dev, netdev_get_num_tc(dev)); }
static void bnxt_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); channel->max_combined = max_rx_rings; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false); tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; channel->max_rx = max_rx_rings; channel->max_tx = max_tx_rings; channel->max_other = 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) { channel->combined_count = bp->rx_nr_rings; } else { channel->rx_count = bp->rx_nr_rings; channel->tx_count = bp->tx_nr_rings_per_tc; } }
/** * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB * @adapter: board private structure to initialize * * Cache the descriptor ring offsets for DCB to the assigned rings. * **/ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; int i, j, k; u8 num_tcs = netdev_get_num_tc(dev); if (!num_tcs) return false; for (i = 0, k = 0; i < num_tcs; i++) { unsigned int tx_s, rx_s; u16 count = dev->tc_to_txq[i].count; ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); for (j = 0; j < count; j++, k++) { adapter->tx_ring[k]->reg_idx = tx_s + j; adapter->rx_ring[k]->reg_idx = rx_s + j; adapter->tx_ring[k]->dcb_tc = i; adapter->rx_ring[k]->dcb_tc = i; } } return true; }
static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; struct ixgbe_ring_feature *f; int rss_i, rss_m, i; int tcs; /* Map queue offset and counts onto allocated tx queues */ tcs = netdev_get_num_tc(dev); /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* determine the upper limit for our current DCB mode */ rss_i = dev->num_tx_queues / tcs; if (adapter->hw.mac.type == ixgbe_mac_82598EB) { /* 8 TC w/ 4 queues per TC */ rss_i = min_t(u16, rss_i, 4); rss_m = IXGBE_RSS_4Q_MASK; } else if (tcs > 4) { /* 8 TC w/ 8 queues per TC */ rss_i = min_t(u16, rss_i, 8); rss_m = IXGBE_RSS_8Q_MASK; } else { /* 4 TC w/ 16 queues per TC */ rss_i = min_t(u16, rss_i, 16); rss_m = IXGBE_RSS_16Q_MASK; } /* set RSS mask and indices */ f = &adapter->ring_feature[RING_F_RSS]; rss_i = min_t(int, rss_i, f->limit); f->indices = rss_i; f->mask = rss_m; /* disable ATR as it is not supported when multiple TCs are enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; #ifdef IXGBE_FCOE /* FCoE enabled queues require special configuration indexed * by feature specific indices and offset. Here we map FCoE * indices onto the DCB queue pairs allowing FCoE to own * configuration later. */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { u8 tc = ixgbe_fcoe_get_tc(adapter); f = &adapter->ring_feature[RING_F_FCOE]; f->indices = min_t(u16, rss_i, f->limit); f->offset = rss_i * tc; } #endif /* IXGBE_FCOE */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(dev, i, rss_i, rss_i * i); adapter->num_tx_queues = rss_i * tcs; adapter->num_rx_queues = rss_i * tcs; return true; }
/** * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV * @adapter: board private structure to initialize * * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It * will also try to cache the proper offsets if RSS/FCoE are enabled along * with VMDq. * **/ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) { #ifdef IXGBE_FCOE struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; int i; u16 reg_idx; u8 tcs = netdev_get_num_tc(adapter->netdev); /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; /* start at VMDq register offset for SR-IOV enabled setups */ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= tcs) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->rx_ring[i]->reg_idx = reg_idx; } reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= tcs) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->tx_ring[i]->reg_idx = reg_idx; } #ifdef IXGBE_FCOE /* nothing to do if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return true; /* The work is already done if the FCoE ring is shared */ if (fcoe->offset < tcs) return true; /* The FCoE rings exist separately, we need to move their reg_idx */ if (fcoe->indices) { u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; adapter->rx_ring[i]->reg_idx = reg_idx; reg_idx++; } reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; adapter->tx_ring[i]->reg_idx = reg_idx; reg_idx++; } } #endif /* IXGBE_FCOE */ return true; }
/** * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB * @adapter: board private structure to initialize * * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues * and VM pools where appropriate. Also assign queues based on DCB * priorities and map accordingly.. * **/ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) { int i; u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif u8 tcs = netdev_get_num_tc(adapter->netdev); /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; /* 16 pools w/ 8 TC per pool */ if (tcs > 4) { vmdq_i = min_t(u16, vmdq_i, 16); vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; /* 32 pools w/ 4 TC per pool */ } else { vmdq_i = min_t(u16, vmdq_i, 32); vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; } #ifdef IXGBE_FCOE /* queues in the remaining pools are available for FCoE */ fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; #endif /* remove the starting offset from the pool count */ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; /* * We do not support DCB, VMDq, and RSS all simultaneously * so we will disable RSS since it is the lowest priority */ adapter->ring_feature[RING_F_RSS].indices = 1; adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->num_rx_pools = vmdq_i; adapter->num_rx_queues_per_pool = tcs; adapter->num_tx_queues = vmdq_i * tcs; adapter->num_rx_queues = vmdq_i * tcs; #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *fcoe; fcoe = &adapter->ring_feature[RING_F_FCOE]; /* limit ourselves based on feature limits */ fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (fcoe_i) { /* alloc queues for FCoE separately */ fcoe->indices = fcoe_i; fcoe->offset = vmdq_i * tcs; /* add queues to adapter */ adapter->num_tx_queues += fcoe_i; adapter->num_rx_queues += fcoe_i; } else if (tcs > 1) { /* use queue belonging to FcoE TC */ fcoe->indices = 1; fcoe->offset = ixgbe_fcoe_get_tc(adapter); } else { adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; fcoe->indices = 0; fcoe->offset = 0; } } #endif /* IXGBE_FCOE */ /* configure TC to queue mapping */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(adapter->netdev, i, 1, i); return true; }
/** * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int vector, v_budget, err; if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) goto try_msi; /* * It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for * (roughly) the same number of vectors as there are CPU's. * The default is to use pairs of vectors. */ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); v_budget = min_t(int, v_budget, num_online_cpus()); v_budget += NON_Q_VECTORS; /* * At the same time, hardware can only support a maximum of * hw.mac->max_msix_vectors vectors. With features * such as RSS and VMDq, we can easily surpass the number of Rx and Tx * descriptor queues supported by our device. Thus, we cap it off in * those rare cases where the cpu count also exceeds our vector limit. */ v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (adapter->msix_entries) { for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; ixgbe_acquire_msix_vectors(adapter, v_budget); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) return; } try_msi: /* disable DCB if number of TCs exceeds 1 */ if (netdev_get_num_tc(adapter->netdev) > 1) { e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); netdev_reset_tc(adapter->netdev); if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; } adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; /* disable SR-IOV */ ixgbe_disable_sriov(adapter); /* disable RSS */ adapter->ring_feature[RING_F_RSS].limit = 1; ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE)) return; err = pci_enable_msi(adapter->pdev); if (err) { netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, "Unable to allocate MSI interrupt, " "falling back to legacy. Error: %d\n", err); return; } adapter->flags |= IXGBE_FLAG_MSI_ENABLED; }
static int bnxt_set_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; u32 rc = 0; bool sh = false; if (channel->other_count) return -EINVAL; if (!channel->combined_count && (!channel->rx_count || !channel->tx_count)) return -EINVAL; if (channel->combined_count && (channel->rx_count || channel->tx_count)) return -EINVAL; if (channel->combined_count) sh = true; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; if (sh && (channel->combined_count > max_rx_rings || channel->combined_count > max_tx_rings)) return -ENOMEM; if (!sh && (channel->rx_count > max_rx_rings || channel->tx_count > max_tx_rings)) return -ENOMEM; if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's * before PF unload */ } rc = bnxt_close_nic(bp, true, false); if (rc) { netdev_err(bp->dev, "Set channel failure rc :%x\n", rc); return rc; } } if (sh) { bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->rx_nr_rings = channel->combined_count; bp->tx_nr_rings_per_tc = channel->combined_count; } else { bp->flags &= ~BNXT_FLAG_SHARED_RINGS; bp->rx_nr_rings = channel->rx_count; bp->tx_nr_rings_per_tc = channel->tx_count; } bp->tx_nr_rings = bp->tx_nr_rings_per_tc; if (tcs > 1) bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; bp->num_stat_ctxs = bp->cp_nr_rings; /* After changing number of rx channels, update NTUPLE feature. */ netdev_update_features(dev); if (netif_running(dev)) { rc = bnxt_open_nic(bp, true, false); if ((!rc) && BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's * to renable */ } } return rc; }
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { u8 err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); if (state > 0) { /* Turn on DCB */ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) goto out; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { e_err(drv, "Enable failed, needs MSI-X\n"); err = 1; goto out; } if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: adapter->last_lfc_mode = adapter->hw.fc.current_mode; adapter->hw.fc.requested_mode = ixgbe_fc_none; break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; break; default: break; } adapter->flags |= IXGBE_FLAG_DCB_ENABLED; if (!netdev_get_num_tc(netdev)) ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); } else { /* Turn off DCB */ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; break; default: break; } ixgbe_setup_tc(netdev, 0); ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); } } out: return err; }
/** * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector * @adapter: board private structure to initialize * @v_count: q_vectors allocated on adapter, used for ring interleaving * @v_idx: index of vector in adapter struct * @txr_count: total number of Tx rings to allocate * @txr_idx: index of first Tx ring to allocate * @rxr_count: total number of Rx rings to allocate * @rxr_idx: index of first Rx ring to allocate * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_count, int v_idx, int txr_count, int txr_idx, int rxr_count, int rxr_idx) { struct ixgbe_q_vector *q_vector; struct ixgbe_ring *ring; int node = NUMA_NO_NODE; int cpu = -1; int ring_count, size; u8 tcs = netdev_get_num_tc(adapter->netdev); ring_count = txr_count + rxr_count; size = sizeof(struct ixgbe_q_vector) + (sizeof(struct ixgbe_ring) * ring_count); /* customize cpu for Flow Director mapping */ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; if (rss_i > 1 && adapter->atr_sample_rate) { if (cpu_online(v_idx)) { cpu = v_idx; node = cpu_to_node(cpu); } } } /* allocate q_vector and rings */ q_vector = kzalloc_node(size, GFP_KERNEL, node); if (!q_vector) q_vector = kzalloc(size, GFP_KERNEL); if (!q_vector) return -ENOMEM; /* setup affinity mask and node */ if (cpu != -1) cpumask_set_cpu(cpu, &q_vector->affinity_mask); q_vector->numa_node = node; #ifdef CONFIG_IXGBE_DCA /* initialize CPU for DCA */ q_vector->cpu = -1; #endif /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); napi_hash_add(&q_vector->napi); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; q_vector->v_idx = v_idx; /* initialize work limits */ q_vector->tx.work_limit = adapter->tx_work_limit; /* initialize pointer to rings */ ring = q_vector->ring; /* intialize ITR */ if (txr_count && !rxr_count) { /* tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_10K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { /* rx or rx/tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else q_vector->itr = adapter->rx_itr_setting; } while (txr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* configure backlink on ring */ ring->q_vector = q_vector; /* update q_vector Tx values */ ixgbe_add_ring(ring, &q_vector->tx); /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; if (adapter->num_rx_pools > 1) ring->queue_index = txr_idx % adapter->num_rx_queues_per_pool; else ring->queue_index = txr_idx; /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; /* update count and index */ txr_count--; txr_idx += v_count; /* push pointer to next ring */ ring++; } while (rxr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* configure backlink on ring */ ring->q_vector = q_vector; /* update q_vector Rx values */ ixgbe_add_ring(ring, &q_vector->rx); /* * 82599 errata, UDP frames with a 0 checksum * can be marked as checksum errors. */ if (adapter->hw.mac.type == ixgbe_mac_82599EB) set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; if ((rxr_idx >= f->offset) && (rxr_idx < f->offset + f->indices)) set_bit(__IXGBE_RX_FCOE, &ring->state); } #endif /* IXGBE_FCOE */ /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; if (adapter->num_rx_pools > 1) ring->queue_index = rxr_idx % adapter->num_rx_queues_per_pool; else ring->queue_index = rxr_idx; /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; /* update count and index */ rxr_count--; rxr_idx += v_count; /* push pointer to next ring */ ring++; } return 0; }