示例#1
0
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
	int err = 0;
	u8 prio_tc[MAX_USER_PRIORITY] = {0};
	int i;
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	/* Fail command if not in CEE mode */
	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
		return 1;

	/* verify there is something to do, if not then exit */
	if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
		goto out;

	if (state > 0) {
		err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
		ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
	} else {
		err = ixgbe_setup_tc(netdev, 0);
	}

	if (err)
		goto out;

	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
		netdev_set_prio_tc_map(netdev, i, prio_tc[i]);

out:
	return err ? 1 : 0;
}
/**
 * ixgbe_dcb_hw_config - Config and enable DCB
 * @hw: pointer to hardware structure
 * @dcb_config: pointer to ixgbe_dcb_config structure
 *
 * Configure dcb settings and enable dcb mode.
 */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
                        struct ixgbe_dcb_config *dcb_config)
{
	s32 ret = 0;
	u8 pfc_en;
	u8 ptype[MAX_TRAFFIC_CLASS];
	u8 bwgid[MAX_TRAFFIC_CLASS];
	u8 prio_tc[MAX_TRAFFIC_CLASS];
	u16 refill[MAX_TRAFFIC_CLASS];
	u16 max[MAX_TRAFFIC_CLASS];

	/* Unpack CEE standard containers */
	ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
	ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
	ixgbe_dcb_unpack_max(dcb_config, max);
	ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
	ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
	ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc);

	switch (hw->mac.type) {
	case ixgbe_mac_82598EB:
		ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
						bwgid, ptype);
		break;
	case ixgbe_mac_82599EB:
	case ixgbe_mac_X540:
		ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
						bwgid, ptype, prio_tc);
		break;
	default:
		break;
	}
	return ret;
}
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
	int per_tc_q, q, i, offset = 0;
	struct net_device *dev = adapter->netdev;
	int tcs = netdev_get_num_tc(dev);

	if (!tcs)
		return false;

	/* Map queue offset and counts onto allocated tx queues */
	per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
	q = min_t(int, num_online_cpus(), per_tc_q);

	for (i = 0; i < tcs; i++) {
		netdev_set_tc_queue(dev, i, q, offset);
		offset += q;
	}

	adapter->num_tx_queues = q * tcs;
	adapter->num_rx_queues = q * tcs;

#ifdef IXGBE_FCOE
	/* FCoE enabled queues require special configuration indexed
	 * by feature specific indices and mask. Here we map FCoE
	 * indices onto the DCB queue pairs allowing FCoE to own
	 * configuration later.
	 */
	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
		u8 prio_tc[MAX_USER_PRIORITY] = {0};
		int tc;
		struct ixgbe_ring_feature *f =
					&adapter->ring_feature[RING_F_FCOE];

		ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
		tc = prio_tc[adapter->fcoe.up];
		f->indices = dev->tc_to_txq[tc].count;
		f->mask = dev->tc_to_txq[tc].offset;
	}
#endif

	return true;
}
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
	struct ixgbe_hw *hw = &adapter->hw;
	int ret = DCB_NO_HW_CHG;
	int i;

	/* Fail command if not in CEE mode */
	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
		return ret;

	adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
						      MAX_TRAFFIC_CLASS);
	if (!adapter->dcb_set_bitmap)
		return ret;

	if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
		u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
		u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
		/* Priority to TC mapping in CEE case default to 1:1 */
		u8 prio_tc[MAX_USER_PRIORITY];
		int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;

#ifdef IXGBE_FCOE
		if (adapter->netdev->features & NETIF_F_FCOE_MTU)
			max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif

		ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame,
					       DCB_TX_CONFIG);
		ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame,
					       DCB_RX_CONFIG);

		ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill);
		ixgbe_dcb_unpack_max(dcb_cfg, max);
		ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id);
		ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type);
		ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc);

		ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id,
					prio_type, prio_tc);

		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
			netdev_set_prio_tc_map(netdev, i, prio_tc[i]);

		ret = DCB_HW_CHG_RST;
	}

	if (adapter->dcb_set_bitmap & BIT_PFC) {
		if (dcb_cfg->pfc_mode_enable) {
			u8 pfc_en;
			u8 prio_tc[MAX_USER_PRIORITY];

			ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc);
			ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en);
			ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc);
		} else {
			hw->mac.ops.fc_enable(hw);
		}

		ixgbe_set_rx_drop_en(adapter);

		ret = DCB_HW_CHG;
	}

#ifdef IXGBE_FCOE
	/* Reprogam FCoE hardware offloads when the traffic class
	 * FCoE is using changes. This happens if the APP info
	 * changes or the up2tc mapping is updated.
	 */
	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
		struct dcb_app app = {
				      .selector = DCB_APP_IDTYPE_ETHTYPE,
				      .protocol = ETH_P_FCOE,
				     };
		u8 up = dcb_getapp(netdev, &app);

		adapter->fcoe.up = ffs(up) - 1;
		ixgbe_dcbnl_devreset(netdev);
		ret = DCB_HW_CHG_RST;
	}
#endif

	adapter->dcb_set_bitmap = 0x00;
	return ret;
}
示例#5
0
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	int ret, i;
#ifdef IXGBE_FCOE
	struct dcb_app app = {
			      .selector = DCB_APP_IDTYPE_ETHTYPE,
			      .protocol = ETH_P_FCOE,
			     };
	u8 up = dcb_getapp(netdev, &app);
#endif

	/* Fail command if not in CEE mode */
	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
		return 1;

	ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
				 MAX_TRAFFIC_CLASS);
	if (ret)
		return DCB_NO_HW_CHG;

#ifdef IXGBE_FCOE
	if (up && (up != (1 << adapter->fcoe.up)))
		adapter->dcb_set_bitmap |= BIT_APP_UPCHG;

	/*
	 * Only take down the adapter if an app change occurred. FCoE
	 * may shuffle tx rings in this case and this can not be done
	 * without a reset currently.
	 */
	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
		while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
			usleep_range(1000, 2000);

		adapter->fcoe.up = ffs(up) - 1;

		if (netif_running(netdev))
			netdev->netdev_ops->ndo_stop(netdev);
		ixgbe_clear_interrupt_scheme(adapter);
	}
#endif

	if (adapter->dcb_cfg.pfc_mode_enable) {
		switch (adapter->hw.mac.type) {
		case ixgbe_mac_82599EB:
		case ixgbe_mac_X540:
			if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
				adapter->last_lfc_mode =
				                  adapter->hw.fc.current_mode;
			break;
		default:
			break;
		}
		adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
	} else {
		switch (adapter->hw.mac.type) {
		case ixgbe_mac_82598EB:
			adapter->hw.fc.requested_mode = ixgbe_fc_none;
			break;
		case ixgbe_mac_82599EB:
		case ixgbe_mac_X540:
			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
			break;
		default:
			break;
		}
	}

#ifdef IXGBE_FCOE
	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
		ixgbe_init_interrupt_scheme(adapter);
		if (netif_running(netdev))
			netdev->netdev_ops->ndo_open(netdev);
		ret = DCB_HW_CHG_RST;
	}
#endif

	if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
		u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
		u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
		/* Priority to TC mapping in CEE case default to 1:1 */
		u8 prio_tc[MAX_USER_PRIORITY];
		int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;

#ifdef IXGBE_FCOE
		if (adapter->netdev->features & NETIF_F_FCOE_MTU)
			max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif

		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
					       max_frame, DCB_TX_CONFIG);
		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
					       max_frame, DCB_RX_CONFIG);

		ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
					DCB_TX_CONFIG, refill);
		ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
		ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
				       DCB_TX_CONFIG, bwg_id);
		ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
				      DCB_TX_CONFIG, prio_type);
		ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
				     DCB_TX_CONFIG, prio_tc);

		ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
					bwg_id, prio_type, prio_tc);

		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
			netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
	}

	if (adapter->dcb_set_bitmap & BIT_PFC) {
		u8 pfc_en;
		u8 prio_tc[MAX_USER_PRIORITY];

		ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
				     DCB_TX_CONFIG, prio_tc);
		ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
		ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
		ret = DCB_HW_CHG;
	}

	if (adapter->dcb_cfg.pfc_mode_enable)
		adapter->hw.fc.current_mode = ixgbe_fc_pfc;

	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
		clear_bit(__IXGBE_RESETTING, &adapter->state);
	adapter->dcb_set_bitmap = 0x00;
	return ret;
}