Ejemplo n.º 1
0
/**
 * e1000e_ptp_init - initialize PTP for devices which support it
 * @adapter: board private structure
 *
 * This function performs the required steps for enabling PTP support.
 * If PTP support has already been loaded it simply calls the cyclecounter
 * init routine and exits.
 **/
void e1000e_ptp_init(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;

	adapter->ptp_clock = NULL;

	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
		return;

	adapter->ptp_clock_info = e1000e_ptp_clock_info;

	snprintf(adapter->ptp_clock_info.name,
		 sizeof(adapter->ptp_clock_info.name), "%pm",
		 adapter->netdev->perm_addr);

	switch (hw->mac.type) {
	case e1000_pch2lan:
	case e1000_pch_lpt:
	case e1000_pch_spt:
		if (((hw->mac.type != e1000_pch_lpt) &&
		     (hw->mac.type != e1000_pch_spt)) ||
		    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
			adapter->ptp_clock_info.max_adj = 24000000 - 1;
			break;
		}
		/* fall-through */
	case e1000_82574:
	case e1000_82583:
		adapter->ptp_clock_info.max_adj = 600000000 - 1;
		break;
	default:
		break;
	}

#ifdef CONFIG_E1000E_HWTS
	/* CPU must have ART and GBe must be from Sunrise Point or greater */
	if (hw->mac.type >= e1000_pch_spt && boot_cpu_has(X86_FEATURE_ART))
		adapter->ptp_clock_info.getcrosststamp =
			e1000e_phc_getcrosststamp;
#endif/*CONFIG_E1000E_HWTS*/

	INIT_DELAYED_WORK(&adapter->systim_overflow_work,
			  e1000e_systim_overflow_work);

	schedule_delayed_work(&adapter->systim_overflow_work,
			      E1000_SYSTIM_OVERFLOW_PERIOD);

	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
						&adapter->pdev->dev);
	if (IS_ERR(adapter->ptp_clock)) {
		adapter->ptp_clock = NULL;
		e_err("ptp_clock_register failed\n");
	} else if (adapter->ptp_clock) {
		e_info("registered PHC clock\n");
	}
}
Ejemplo n.º 2
0
static int e1000_set_settings(struct net_device *netdev,
			      struct ethtool_cmd *ecmd)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;

	/*
	 * When SoL/IDER sessions are active, autoneg/speed/duplex
	 * cannot be changed
	 */
	if (e1000_check_reset_block(hw)) {
		e_err("Cannot change link characteristics when SoL/IDER is "
		      "active.\n");
		return -EINVAL;
	}

	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
		msleep(1);

	if (ecmd->autoneg == AUTONEG_ENABLE) {
		hw->mac.autoneg = 1;
		if (hw->phy.media_type == e1000_media_type_fiber)
			hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
						     ADVERTISED_FIBRE |
						     ADVERTISED_Autoneg;
		else
			hw->phy.autoneg_advertised = ecmd->advertising |
						     ADVERTISED_TP |
						     ADVERTISED_Autoneg;
		ecmd->advertising = hw->phy.autoneg_advertised;
		if (adapter->fc_autoneg)
			hw->fc.requested_mode = e1000_fc_default;
	} else {
		if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
			clear_bit(__E1000_RESETTING, &adapter->state);
			return -EINVAL;
		}
	}

	/* reset the link */

	if (netif_running(adapter->netdev)) {
		e1000e_down(adapter);
		e1000e_up(adapter);
	} else {
		e1000e_reset(adapter);
	}

	clear_bit(__E1000_RESETTING, &adapter->state);
	return 0;
}
Ejemplo n.º 3
0
Archivo: ethtool.c Proyecto: A-K/linux
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
			      int reg, u32 mask, u32 write)
{
	u32 val;
	__ew32(&adapter->hw, reg, write & mask);
	val = __er32(&adapter->hw, reg);
	if ((write & mask) != (val & mask)) {
		e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
		      reg, (val & mask), (write & mask));
		*data = reg;
		return 1;
	}
	return 0;
}
Ejemplo n.º 4
0
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
{
	struct e1000_mac_info *mac = &adapter->hw.mac;

	mac->autoneg = 0;

	/* Fiber NICs only allow 1000 gbps Full duplex */
	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
		e_err("Unsupported Speed/Duplex configuration\n");
		return -EINVAL;
	}

	switch (spddplx) {
	case SPEED_10 + DUPLEX_HALF:
		mac->forced_speed_duplex = ADVERTISE_10_HALF;
		break;
	case SPEED_10 + DUPLEX_FULL:
		mac->forced_speed_duplex = ADVERTISE_10_FULL;
		break;
	case SPEED_100 + DUPLEX_HALF:
		mac->forced_speed_duplex = ADVERTISE_100_HALF;
		break;
	case SPEED_100 + DUPLEX_FULL:
		mac->forced_speed_duplex = ADVERTISE_100_FULL;
		break;
	case SPEED_1000 + DUPLEX_FULL:
		mac->autoneg = 1;
		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
		break;
	case SPEED_1000 + DUPLEX_HALF: /* not supported */
	default:
		e_err("Unsupported Speed/Duplex configuration\n");
		return -EINVAL;
	}
	return 0;
}
Ejemplo n.º 5
0
Archivo: ethtool.c Proyecto: A-K/linux
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
{
	struct e1000_mac_info *mac = &adapter->hw.mac;

	mac->autoneg = 0;

	/* Make sure dplx is at most 1 bit and lsb of speed is not set
	 * for the switch() below to work */
	if ((spd & 1) || (dplx & ~1))
		goto err_inval;

	/* Fiber NICs only allow 1000 gbps Full duplex */
	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
	    spd != SPEED_1000 &&
	    dplx != DUPLEX_FULL) {
		goto err_inval;
	}

	switch (spd + dplx) {
	case SPEED_10 + DUPLEX_HALF:
		mac->forced_speed_duplex = ADVERTISE_10_HALF;
		break;
	case SPEED_10 + DUPLEX_FULL:
		mac->forced_speed_duplex = ADVERTISE_10_FULL;
		break;
	case SPEED_100 + DUPLEX_HALF:
		mac->forced_speed_duplex = ADVERTISE_100_HALF;
		break;
	case SPEED_100 + DUPLEX_FULL:
		mac->forced_speed_duplex = ADVERTISE_100_FULL;
		break;
	case SPEED_1000 + DUPLEX_FULL:
		mac->autoneg = 1;
		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
		break;
	case SPEED_1000 + DUPLEX_HALF: /* not supported */
	default:
		goto err_inval;
	}

	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
	adapter->hw.phy.mdix = AUTO_ALL_MODES;

	return 0;

err_inval:
	e_err("Unsupported Speed/Duplex configuration\n");
	return -EINVAL;
}
Ejemplo n.º 6
0
/**
 * e1000e_ptp_init - initialize PTP for devices which support it
 * @adapter: board private structure
 *
 * This function performs the required steps for enabling PTP support.
 * If PTP support has already been loaded it simply calls the cyclecounter
 * init routine and exits.
 **/
void e1000e_ptp_init(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;

	adapter->ptp_clock = NULL;

	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
		return;

	adapter->ptp_clock_info = e1000e_ptp_clock_info;

	snprintf(adapter->ptp_clock_info.name,
		 sizeof(adapter->ptp_clock_info.name), "%pm",
		 adapter->netdev->perm_addr);

	switch (hw->mac.type) {
	case e1000_pch2lan:
	case e1000_pch_lpt:
	case e1000_pch_spt:
		if (((hw->mac.type != e1000_pch_lpt) &&
		     (hw->mac.type != e1000_pch_spt)) ||
		    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
			adapter->ptp_clock_info.max_adj = 24000000 - 1;
			break;
		}
		/* fall-through */
	case e1000_82574:
	case e1000_82583:
		adapter->ptp_clock_info.max_adj = 600000000 - 1;
		break;
	default:
		break;
	}

	INIT_DELAYED_WORK(&adapter->systim_overflow_work,
			  e1000e_systim_overflow_work);

	schedule_delayed_work(&adapter->systim_overflow_work,
			      E1000_SYSTIM_OVERFLOW_PERIOD);

	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
						pci_dev_to_dev(adapter->pdev));
	if (IS_ERR(adapter->ptp_clock)) {
		adapter->ptp_clock = NULL;
		e_err("ptp_clock_register failed\n");
	} else {
		e_info("registered PHC clock\n");
	}
}
Ejemplo n.º 7
0
Archivo: ethtool.c Proyecto: A-K/linux
static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
			     int reg, int offset, u32 mask, u32 write)
{
	u32 pat, val;
	static const u32 test[] = {
		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
	for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
		E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
				      (test[pat] & write));
		val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
		if (val != (test[pat] & write & mask)) {
			e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
			      reg + offset, val, (test[pat] & write & mask));
			*data = reg;
			return 1;
		}
	}
	return 0;
}
Ejemplo n.º 8
0
/**
 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
 * @adapter: board private structure to initialize
 *
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 **/
static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	int vector, v_budget, err;

	if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE))
		goto try_msi;
	/*
	 * It's easy to be greedy for MSI-X vectors, but it really
	 * doesn't do us much good if we have a lot more vectors
	 * than CPU's.  So let's be conservative and only ask for
	 * (roughly) the same number of vectors as there are CPU's.
	 * The default is to use pairs of vectors.
	 */
	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
	v_budget = min_t(int, v_budget, num_online_cpus());
	v_budget += NON_Q_VECTORS;

	/*
	 * At the same time, hardware can only support a maximum of
	 * hw.mac->max_msix_vectors vectors.  With features
	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
	 * descriptor queues supported by our device.  Thus, we cap it off in
	 * those rare cases where the cpu count also exceeds our vector limit.
	 */
	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);

	/* A failure in MSI-X entry allocation isn't fatal, but it does
	 * mean we disable MSI-X capabilities of the adapter. */
	adapter->msix_entries = kcalloc(v_budget,
					sizeof(struct msix_entry), GFP_KERNEL);
	if (adapter->msix_entries) {
		for (vector = 0; vector < v_budget; vector++)
			adapter->msix_entries[vector].entry = vector;

		ixgbe_acquire_msix_vectors(adapter, v_budget);

		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
			return;
	}

try_msi:
	/* disable DCB if number of TCs exceeds 1 */
	if (netdev_get_num_tc(adapter->netdev) > 1) {
		e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
		netdev_reset_tc(adapter->netdev);

		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;

		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
		adapter->temp_dcb_cfg.pfc_mode_enable = false;
		adapter->dcb_cfg.pfc_mode_enable = false;
	}
	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;

	/* disable SR-IOV */
	ixgbe_disable_sriov(adapter);

	/* disable RSS */
	adapter->ring_feature[RING_F_RSS].limit = 1;

	ixgbe_set_num_queues(adapter);
	adapter->num_q_vectors = 1;
	if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE))
		return;

	err = pci_enable_msi(adapter->pdev);
	if (err) {
		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
			     "Unable to allocate MSI interrupt, "
			     "falling back to legacy.  Error: %d\n", err);
		return;
	}
	adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
}
Ejemplo n.º 9
0
/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct ixgbe_fcoe_ddp_pool *ddp_pool;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid >= IXGBE_FCOE_DDP_MAX) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
		      xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);


	if (!fcoe->ddp_pool) {
		e_warn(drv, "No ddp_pool resources allocated\n");
		return 0;
	}

	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
	if (!ddp_pool->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		goto out_noddp;
	}

	/* setup dma from scsi command sgl */
	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		goto out_noddp;
	}

	/* alloc the udl from per cpu ddp pool */
	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->pool = ddp_pool->pool;
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				ddp_pool->noddp++;
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
				goto out_noddp_free;
			/*
			 * all but the last buffer
			 * ((i == (dmacount - 1)) && (thislen == len))
			 * must end at bufflen
			 */
			if (((i != (dmacount - 1)) || (thislen != len))
			    && ((thislen + thisoff) != bufflen))
				goto out_noddp_free;

			ddp->udl[j] = (u64)(addr - thisoff);
			/* only the first buffer may have none-zero offset */
			if (j == 0)
				firstoff = thisoff;
			len -= thislen;
			addr += thislen;
			j++;
		}
	}
Ejemplo n.º 10
0
Archivo: ethtool.c Proyecto: A-K/linux
static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
{
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_mac_info *mac = &adapter->hw.mac;
	u32 value;
	u32 before;
	u32 after;
	u32 i;
	u32 toggle;
	u32 mask;
	u32 wlock_mac = 0;

	/*
	 * The status register is Read Only, so a write should fail.
	 * Some bits that get toggled are ignored.
	 */
	switch (mac->type) {
	/* there are several bits on newer hardware that are r/w */
	case e1000_82571:
	case e1000_82572:
	case e1000_80003es2lan:
		toggle = 0x7FFFF3FF;
		break;
        default:
		toggle = 0x7FFFF033;
		break;
	}

	before = er32(STATUS);
	value = (er32(STATUS) & toggle);
	ew32(STATUS, toggle);
	after = er32(STATUS) & toggle;
	if (value != after) {
		e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
		      after, value);
		*data = 1;
		return 1;
	}
	/* restore previous status */
	ew32(STATUS, before);

	if (!(adapter->flags & FLAG_IS_ICH)) {
		REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
		REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
		REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
		REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
	}

	REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
	REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
	REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
	REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
	REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
	REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
	REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);

	REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);

	before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
	REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
	REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);

	REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
	if (!(adapter->flags & FLAG_IS_ICH))
		REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
	REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
	mask = 0x8003FFFF;
	switch (mac->type) {
	case e1000_ich10lan:
	case e1000_pchlan:
	case e1000_pch2lan:
	case e1000_pch_lpt:
		mask |= (1 << 18);
		break;
	default:
		break;
	}

	if (mac->type == e1000_pch_lpt)
		wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
		    E1000_FWSM_WLOCK_MAC_SHIFT;

	for (i = 0; i < mac->rar_entry_count; i++) {
		/* Cannot test write-protected SHRAL[n] registers */
		if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
			continue;

		REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
				       mask, 0xFFFFFFFF);
	}

	for (i = 0; i < mac->mta_reg_count; i++)
		REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);

	*data = 0;

	return 0;
}
Ejemplo n.º 11
0
Archivo: ethtool.c Proyecto: A-K/linux
static int e1000_set_settings(struct net_device *netdev,
			      struct ethtool_cmd *ecmd)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;

	/*
	 * When SoL/IDER sessions are active, autoneg/speed/duplex
	 * cannot be changed
	 */
	if (hw->phy.ops.check_reset_block &&
	    hw->phy.ops.check_reset_block(hw)) {
		e_err("Cannot change link characteristics when SoL/IDER is active.\n");
		return -EINVAL;
	}

	/*
	 * MDI setting is only allowed when autoneg enabled because
	 * some hardware doesn't allow MDI setting when speed or
	 * duplex is forced.
	 */
	if (ecmd->eth_tp_mdix_ctrl) {
		if (hw->phy.media_type != e1000_media_type_copper)
			return -EOPNOTSUPP;

		if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
		    (ecmd->autoneg != AUTONEG_ENABLE)) {
			e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
			return -EINVAL;
		}
	}

	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
		usleep_range(1000, 2000);

	if (ecmd->autoneg == AUTONEG_ENABLE) {
		hw->mac.autoneg = 1;
		if (hw->phy.media_type == e1000_media_type_fiber)
			hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
						     ADVERTISED_FIBRE |
						     ADVERTISED_Autoneg;
		else
			hw->phy.autoneg_advertised = ecmd->advertising |
						     ADVERTISED_TP |
						     ADVERTISED_Autoneg;
		ecmd->advertising = hw->phy.autoneg_advertised;
		if (adapter->fc_autoneg)
			hw->fc.requested_mode = e1000_fc_default;
	} else {
		u32 speed = ethtool_cmd_speed(ecmd);
		/* calling this overrides forced MDI setting */
		if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
			clear_bit(__E1000_RESETTING, &adapter->state);
			return -EINVAL;
		}
	}

	/* MDI-X => 2; MDI => 1; Auto => 3 */
	if (ecmd->eth_tp_mdix_ctrl) {
		/*
		 * fix up the value for auto (3 => 0) as zero is mapped
		 * internally to auto
		 */
		if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
			hw->phy.mdix = AUTO_ALL_MODES;
		else
			hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
	}

	/* reset the link */

	if (netif_running(adapter->netdev)) {
		e1000e_down(adapter);
		e1000e_up(adapter);
	} else
		e1000e_reset(adapter);

	clear_bit(__E1000_RESETTING, &adapter->state);
	return 0;
}
Ejemplo n.º 12
0
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
	u8 err = 0;
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	if (state > 0) {
		/* Turn on DCB */
		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
			goto out;

		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
			e_err(drv, "Enable failed, needs MSI-X\n");
			err = 1;
			goto out;
		}

		if (netif_running(netdev))
			netdev->netdev_ops->ndo_stop(netdev);
		ixgbe_clear_interrupt_scheme(adapter);

		switch (adapter->hw.mac.type) {
		case ixgbe_mac_82598EB:
			adapter->last_lfc_mode = adapter->hw.fc.current_mode;
			adapter->hw.fc.requested_mode = ixgbe_fc_none;
			break;
		case ixgbe_mac_82599EB:
		case ixgbe_mac_X540:
			adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
			adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
			break;
		default:
			break;
		}

		adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
		if (!netdev_get_num_tc(netdev))
			ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);

		ixgbe_init_interrupt_scheme(adapter);
		if (netif_running(netdev))
			netdev->netdev_ops->ndo_open(netdev);
	} else {
		/* Turn off DCB */
		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
			if (netif_running(netdev))
				netdev->netdev_ops->ndo_stop(netdev);
			ixgbe_clear_interrupt_scheme(adapter);

			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
			adapter->temp_dcb_cfg.pfc_mode_enable = false;
			adapter->dcb_cfg.pfc_mode_enable = false;
			adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
			switch (adapter->hw.mac.type) {
			case ixgbe_mac_82599EB:
			case ixgbe_mac_X540:
				adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
				break;
			default:
				break;
			}

			ixgbe_setup_tc(netdev, 0);

			ixgbe_init_interrupt_scheme(adapter);
			if (netif_running(netdev))
				netdev->netdev_ops->ndo_open(netdev);
		}
	}
out:
	return err;
}
/**
 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
 * @adapter: board private structure to initialize
 *
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 **/
static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	int err = 0;
	int vector, v_budget;

	/*
	 * It's easy to be greedy for MSI-X vectors, but it really
	 * doesn't do us much good if we have a lot more vectors
	 * than CPU's.  So let's be conservative and only ask for
	 * (roughly) the same number of vectors as there are CPU's.
	 * The default is to use pairs of vectors.
	 */
	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
	v_budget = min_t(int, v_budget, num_online_cpus());
	v_budget += NON_Q_VECTORS;

	/*
	 * At the same time, hardware can only support a maximum of
	 * hw.mac->max_msix_vectors vectors.  With features
	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
	 * descriptor queues supported by our device.  Thus, we cap it off in
	 * those rare cases where the cpu count also exceeds our vector limit.
	 */
	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);

	/* A failure in MSI-X entry allocation isn't fatal, but it does
	 * mean we disable MSI-X capabilities of the adapter. */
	adapter->msix_entries = kcalloc(v_budget,
					sizeof(struct msix_entry), GFP_KERNEL);
	if (adapter->msix_entries) {
		for (vector = 0; vector < v_budget; vector++)
			adapter->msix_entries[vector].entry = vector;

		ixgbe_acquire_msix_vectors(adapter, v_budget);

		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
			goto out;
	}

	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
		e_err(probe,
		      "ATR is not supported while multiple "
		      "queues are disabled.  Disabling Flow Director\n");
	}
	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
	adapter->atr_sample_rate = 0;
	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
		ixgbe_disable_sriov(adapter);

	err = ixgbe_set_num_queues(adapter);
	if (err)
		return err;

	err = pci_enable_msi(adapter->pdev);
	if (!err) {
		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
	} else {
		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
			     "Unable to allocate MSI interrupt, "
			     "falling back to legacy.  Error: %d\n", err);
		/* reset err */
		err = 0;
	}

out:
	return err;
}