Exemplo n.º 1
0
static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
			      unsigned int loopback_modes)
{
	enum efx_loopback_mode mode;
	struct efx_loopback_state *state;
	struct efx_tx_queue *tx_queue;
	int rc = 0;

	/* Set the port loopback_selftest member. From this point on
	 * all received packets will be dropped. Mark the state as
	 * "flushing" so all inflight packets are dropped */
	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (state == NULL)
		return -ENOMEM;
	BUG_ON(efx->loopback_selftest);
	state->flush = true;
	efx->loopback_selftest = state;

	/* Test all supported loopback modes */
	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
		if (!(loopback_modes & (1 << mode)))
			continue;

		/* Move the port into the specified loopback mode. */
		state->flush = true;
		mutex_lock(&efx->mac_lock);
		efx->loopback_mode = mode;
		rc = __efx_reconfigure_port(efx);
		mutex_unlock(&efx->mac_lock);
		if (rc) {
			EFX_ERR(efx, "unable to move into %s loopback\n",
				LOOPBACK_MODE(efx));
			goto out;
		}

		rc = efx_wait_for_link(efx);
		if (rc) {
			EFX_ERR(efx, "loopback %s never came up\n",
				LOOPBACK_MODE(efx));
			goto out;
		}

		/* Test every TX queue */
		efx_for_each_tx_queue(tx_queue, efx) {
			state->offload_csum = (tx_queue->queue ==
					       EFX_TX_QUEUE_OFFLOAD_CSUM);
			rc = efx_test_loopback(tx_queue,
					       &tests->loopback[mode]);
			if (rc)
				goto out;
		}
	}
Exemplo n.º 2
0
/* Wait for SPI command completion */
static int falcon_spi_wait(struct efx_nic *efx)
{
	/* Most commands will finish quickly, so we start polling at
	 * very short intervals.  Sometimes the command may have to
	 * wait for VPD or expansion ROM access outside of our
	 * control, so we allow up to 100 ms. */
	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
	int i;

	for (i = 0; i < 10; i++) {
		if (!falcon_spi_poll(efx))
			return 0;
		udelay(10);
	}

	for (;;) {
		if (!falcon_spi_poll(efx))
			return 0;
		if (time_after_eq(jiffies, timeout)) {
			EFX_ERR(efx, "timed out waiting for SPI\n");
			return -ETIMEDOUT;
		}
		schedule_timeout_uninterruptible(1);
	}
}
Exemplo n.º 3
0
/* Remove packets from the TX queue
 *
 * This removes packets from the TX queue, up to and including the
 * specified index.
 */
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
				unsigned int index)
{
	struct efx_nic *efx = tx_queue->efx;
	unsigned int stop_index, read_ptr;
	unsigned int mask = tx_queue->efx->type->txd_ring_mask;

	stop_index = (index + 1) & mask;
	read_ptr = tx_queue->read_count & mask;

	while (read_ptr != stop_index) {
		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
		if (unlikely(buffer->len == 0)) {
			EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
				"completion id %x\n", tx_queue->queue,
				read_ptr);
			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
			return;
		}

		efx_dequeue_buffer(tx_queue, buffer);
		buffer->continuation = true;
		buffer->len = 0;

		++tx_queue->read_count;
		read_ptr = tx_queue->read_count & mask;
	}
}
static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
{
	struct i2c_client *client = falcon_board(efx)->hwmon_client;
	s32 alarms1, alarms2;

	/* If link is up then do not monitor temperature */
	if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
		return 0;

	alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
	alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
	if (alarms1 < 0)
		return alarms1;
	if (alarms2 < 0)
		return alarms2;
	alarms1 &= mask;
	alarms2 &= mask >> 8;
	if (alarms1 || alarms2) {
		EFX_ERR(efx,
			"LM87 detected a hardware failure (status %02x:%02x)"
			"%s%s\n",
			alarms1, alarms2,
			(alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
			(alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
		return -ERANGE;
	}

	return 0;
}
int falcon_reset_xaui(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;
	efx_oword_t reg;
	int count;

	/* Don't fetch MAC statistics over an XMAC reset */
	WARN_ON(nic_data->stats_disable_count == 0);

	/* Start reset sequence */
	EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
	efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);

	/* Wait up to 10 ms for completion, then reinitialise */
	for (count = 0; count < 1000; count++) {
		efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
		if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
		    EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
			falcon_setup_xaui(efx);
			return 0;
		}
		udelay(10);
	}
	EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
	return -ETIMEDOUT;
}
Exemplo n.º 6
0
/* Reset the PHYXS MMD. This is documented (for the Quake PHY) as doing
 * a complete soft reset.
 */
static int xfp_reset_phy(struct efx_nic *efx)
{
	int rc;

	rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PHYXS,
				     XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
				     XFP_RESET_WAIT);
	if (rc < 0)
		goto fail;

	/* Wait 250ms for the PHY to complete bootup */
	msleep(250);

	/* Check that all the MMDs we expect are present and responding. We
	 * expect faults on some if the link is down, but not on the PHY XS */
	rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS,
				      MDIO_MMDREG_DEVS_PHYXS);
	if (rc < 0)
		goto fail;

	efx->board_info.init_leds(efx);

	return rc;

 fail:
	EFX_ERR(efx, "XFP: reset timed out!\n");
	return rc;
}
Exemplo n.º 7
0
/*
 * Allocate a page worth of efx_tso_header structures, and string them
 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
 */
static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
{

	struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
	struct efx_tso_header *tsoh;
	dma_addr_t dma_addr;
	u8 *base_kva, *kva;

	base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
	if (base_kva == NULL) {
		EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
			" headers\n");
		return -ENOMEM;
	}

	/* pci_alloc_consistent() allocates pages. */
	EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));

	for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
		tsoh = (struct efx_tso_header *)kva;
		tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
		tsoh->next = tx_queue->tso_headers_free;
		tx_queue->tso_headers_free = tsoh;
	}

	return 0;
}
Exemplo n.º 8
0
static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
{
	int rc = 0;
	int devad = __ffs(efx->mdio.mmds);
	u16 physid1, physid2;

	if (efx->phy_type == PHY_TYPE_NONE)
		return 0;

	mutex_lock(&efx->mac_lock);
	tests->mdio = -1;

	physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
	physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);

	if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
	    (physid2 == 0x0000) || (physid2 == 0xffff)) {
		EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
			efx->mdio.prtad);
		rc = -EINVAL;
		goto out;
	}

	if (EFX_IS10G(efx)) {
		rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
		if (rc)
			goto out;
	}

out:
	mutex_unlock(&efx->mac_lock);
	tests->mdio = rc ? -1 : 1;
	return rc;
}
Exemplo n.º 9
0
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
	struct falcon_nvconfig *nvconfig;
	int board_rev;
	int rc;

	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
	if (!nvconfig)
		return -ENOMEM;

	rc = falcon_read_nvram(efx, nvconfig);
	if (rc == -EINVAL) {
		EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
		efx->phy_type = PHY_TYPE_NONE;
		efx->mdio.prtad = MDIO_PRTAD_NONE;
		board_rev = 0;
		rc = 0;
	} else if (rc) {
		goto fail1;
	} else {
		struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
		struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;

		efx->phy_type = v2->port0_phy_type;
		efx->mdio.prtad = v2->port0_phy_addr;
		board_rev = le16_to_cpu(v2->board_revision);

		if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
			rc = falcon_spi_device_init(
				efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
				le32_to_cpu(v3->spi_device_type
					    [FFE_AB_SPI_DEVICE_FLASH]));
			if (rc)
				goto fail2;
			rc = falcon_spi_device_init(
				efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
				le32_to_cpu(v3->spi_device_type
					    [FFE_AB_SPI_DEVICE_EEPROM]));
			if (rc)
				goto fail2;
		}
	}

	/* Read the MAC addresses */
	memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);

	EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);

	falcon_probe_board(efx, board_rev);

	kfree(nvconfig);
	return 0;

 fail2:
	falcon_remove_spi_devices(efx);
 fail1:
	kfree(nvconfig);
	return rc;
}
Exemplo n.º 10
0
static int efx_end_loopback(struct efx_tx_queue *tx_queue,
			    struct efx_loopback_self_tests *lb_tests)
{
	struct efx_nic *efx = tx_queue->efx;
	struct efx_loopback_state *state = efx->loopback_selftest;
	struct sk_buff *skb;
	int tx_done = 0, rx_good, rx_bad;
	int i, rc = 0;

	if (efx_dev_registered(efx))
		netif_tx_lock_bh(efx->net_dev);

	/* Count the number of tx completions, and decrement the refcnt. Any
	 * skbs not already completed will be free'd when the queue is flushed */
	for (i=0; i < state->packet_count; i++) {
		skb = state->skbs[i];
		if (skb && !skb_shared(skb))
			++tx_done;
		dev_kfree_skb_any(skb);
	}

	if (efx_dev_registered(efx))
		netif_tx_unlock_bh(efx->net_dev);

	/* Check TX completion and received packet counts */
	rx_good = atomic_read(&state->rx_good);
	rx_bad = atomic_read(&state->rx_bad);
	if (tx_done != state->packet_count) {
		/* Don't free the skbs; they will be picked up on TX
		 * overflow or channel teardown.
		 */
		EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
			"TX completion events in %s loopback test\n",
			tx_queue->queue, tx_done, state->packet_count,
			LOOPBACK_MODE(efx));
		rc = -ETIMEDOUT;
		/* Allow to fall through so we see the RX errors as well */
	}

	/* We may always be up to a flush away from our desired packet total */
	if (rx_good != state->packet_count) {
		EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
			"received packets in %s loopback test\n",
			tx_queue->queue, rx_good, state->packet_count,
			LOOPBACK_MODE(efx));
		rc = -ETIMEDOUT;
		/* Fall through */
	}

	/* Update loopback test structure */
	lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
	lb_tests->tx_done[tx_queue->queue] += tx_done;
	lb_tests->rx_good += rx_good;
	lb_tests->rx_bad += rx_bad;

	return rc;
}
Exemplo n.º 11
0
static void efx_mtd_sync(struct mtd_info *mtd)
{
	struct efx_mtd *efx_mtd = mtd->priv;
	struct efx_nic *efx = efx_mtd->efx;
	int rc;

	rc = efx_mtd->ops->sync(mtd);
	if (rc)
		EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
}
Exemplo n.º 12
0
/* This call is responsible for hooking in the MAC and PHY operations */
static int falcon_probe_port(struct efx_nic *efx)
{
	int rc;

	switch (efx->phy_type) {
	case PHY_TYPE_SFX7101:
		efx->phy_op = &falcon_sfx7101_phy_ops;
		break;
	case PHY_TYPE_SFT9001A:
	case PHY_TYPE_SFT9001B:
		efx->phy_op = &falcon_sft9001_phy_ops;
		break;
	case PHY_TYPE_QT2022C2:
	case PHY_TYPE_QT2025C:
		efx->phy_op = &falcon_qt202x_phy_ops;
		break;
	default:
		EFX_ERR(efx, "Unknown PHY type %d\n",
			efx->phy_type);
		return -ENODEV;
	}

	/* Fill out MDIO structure and loopback modes */
	efx->mdio.mdio_read = falcon_mdio_read;
	efx->mdio.mdio_write = falcon_mdio_write;
	rc = efx->phy_op->probe(efx);
	if (rc != 0)
		return rc;

	/* Initial assumption */
	efx->link_state.speed = 10000;
	efx->link_state.fd = true;

	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
		efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
	else
		efx->wanted_fc = EFX_FC_RX;

	/* Allocate buffer for stats */
	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
				  FALCON_MAC_STATS_SIZE);
	if (rc)
		return rc;
	EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
		(u64)efx->stats_buffer.dma_addr,
		efx->stats_buffer.addr,
		(u64)virt_to_phys(efx->stats_buffer.addr));

	return 0;
}
Exemplo n.º 13
0
/* Wait for GMII access to complete */
static int falcon_gmii_wait(struct efx_nic *efx)
{
	efx_oword_t md_stat;
	int count;

	/* wait upto 50ms - taken max from datasheet */
	for (count = 0; count < 5000; count++) {
		efx_reado(efx, &md_stat, FR_AB_MD_STAT);
		if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
			if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
			    EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
				EFX_ERR(efx, "error from GMII access "
					EFX_OWORD_FMT"\n",
					EFX_OWORD_VAL(md_stat));
				return -EIO;
			}
			return 0;
		}
		udelay(10);
	}
	EFX_ERR(efx, "timed out waiting for GMII\n");
	return -ETIMEDOUT;
}
Exemplo n.º 14
0
static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
{
	struct efx_nic *efx = tx_queue->efx;
	struct efx_loopback_state *state = efx->loopback_selftest;
	struct efx_loopback_payload *payload;
	struct sk_buff *skb;
	int i;
	netdev_tx_t rc;

	/* Transmit N copies of buffer */
	for (i = 0; i < state->packet_count; i++) {
		/* Allocate an skb, holding an extra reference for
		 * transmit completion counting */
		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
		if (!skb)
			return -ENOMEM;
		state->skbs[i] = skb;
		skb_get(skb);

		/* Copy the payload in, incrementing the source address to
		 * exercise the rss vectors */
		payload = ((struct efx_loopback_payload *)
			   skb_put(skb, sizeof(state->payload)));
		memcpy(payload, &state->payload, sizeof(state->payload));
		payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));

		/* Ensure everything we've written is visible to the
		 * interrupt handler. */
		smp_wmb();

		if (efx_dev_registered(efx))
			netif_tx_lock_bh(efx->net_dev);
		rc = efx_enqueue_skb(tx_queue, skb);
		if (efx_dev_registered(efx))
			netif_tx_unlock_bh(efx->net_dev);

		if (rc != NETDEV_TX_OK) {
			EFX_ERR(efx, "TX queue %d could not transmit packet %d "
				"of %d in %s loopback test\n", tx_queue->queue,
				i + 1, state->packet_count, LOOPBACK_MODE(efx));

			/* Defer cleaning up the other skbs for the caller */
			kfree_skb(skb);
			return -EPIPE;
		}
	}

	return 0;
}
Exemplo n.º 15
0
static void falcon_stats_complete(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data = efx->nic_data;

	if (!nic_data->stats_pending)
		return;

	nic_data->stats_pending = 0;
	if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
		rmb(); /* read the done flag before the stats */
		efx->mac_op->update_stats(efx);
	} else {
		EFX_ERR(efx, "timed out waiting for statistics\n");
	}
}
Exemplo n.º 16
0
Arquivo: boards.c Projeto: 274914765/C
int efx_set_board_info(struct efx_nic *efx, u16 revision_info)
{
    int rc = 0;
    struct efx_board_data *data;

    if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) {
        EFX_ERR(efx, "squashing unknown board type %d\n",
            BOARD_TYPE(revision_info));
        revision_info = 0;
    }

    if (BOARD_TYPE(revision_info) == 0) {
        efx->board_info.major = 0;
        efx->board_info.minor = 0;
        /* For early boards that don't have revision info. there is
         * only 1 board for each PHY type, so we can work it out, with
         * the exception of the PHY-less boards. */
        switch (efx->phy_type) {
        case PHY_TYPE_10XPRESS:
            efx->board_info.type = EFX_BOARD_SFE4001;
            break;
        case PHY_TYPE_XFP:
            efx->board_info.type = EFX_BOARD_SFE4002;
            break;
        default:
            efx->board_info.type = 0;
            break;
        }
    } else {
        efx->board_info.type = BOARD_TYPE(revision_info);
        efx->board_info.major = BOARD_MAJOR(revision_info);
        efx->board_info.minor = BOARD_MINOR(revision_info);
    }

    data = &board_data[efx->board_info.type];

    /* Report the board model number or generic type for recognisable
     * boards. */
    if (efx->board_info.type != 0)
        EFX_INFO(efx, "board is %s rev %c%d\n",
             (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
             ? data->ref_model : data->gen_type,
             'A' + efx->board_info.major, efx->board_info.minor);

    efx->board_info.init = data->init;

    return rc;
}
Exemplo n.º 17
0
/**************************************************************************
 *
 * MAC operations
 *
 *************************************************************************/
static int falcon_reset_xmac(struct efx_nic *efx)
{
	efx_oword_t reg;
	int count;

	EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
	falcon_write(efx, &reg, XM_GLB_CFG_REG);

	for (count = 0; count < 10000; count++) {	/* wait upto 100ms */
		falcon_read(efx, &reg, XM_GLB_CFG_REG);
		if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
			return 0;
		udelay(10);
	}

	EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
	return -ETIMEDOUT;
}
Exemplo n.º 18
0
static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
{
	struct tenxpress_phy_data *pd = efx->phy_data;
	int phy_id = efx->mii.phy_id;
	bool bad_lp;
	int reg;

	if (link_ok) {
		bad_lp = false;
	} else {
		/* Check that AN has started but not completed. */
		reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
					 MDIO_AN_STATUS);
		if (!(reg & (1 << MDIO_AN_STATUS_LP_AN_CAP_LBN)))
			return; /* LP status is unknown */
		bad_lp = !(reg & (1 << MDIO_AN_STATUS_AN_DONE_LBN));
		if (bad_lp)
			pd->bad_lp_tries++;
	}

	/* Nothing to do if all is well and was previously so. */
	if (!pd->bad_lp_tries)
		return;

	/* Use the RX (red) LED as an error indicator once we've seen AN
	 * failure several times in a row, and also log a message. */
	if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
		reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
					 PMA_PMD_LED_OVERR_REG);
		reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
		if (!bad_lp) {
			reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
		} else {
			reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
			EFX_ERR(efx, "appears to be plugged into a port"
				" that is not 10GBASE-T capable. The PHY"
				" supports 10GBASE-T ONLY, so no link can"
				" be established\n");
		}
		mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
				    PMA_PMD_LED_OVERR_REG, reg);
		pd->bad_lp_tries = bad_lp;
	}
}
Exemplo n.º 19
0
/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
 * global reset (it's less clear what reset of other MMDs does).*/
static int txc_reset_phy(struct efx_nic *efx)
{
	int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
				    TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
				    TXC_RESET_WAIT);
	if (rc < 0)
		goto fail;

	/* Check that all the MMDs we expect are present and responding. */
	rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
	if (rc < 0)
		goto fail;

	return 0;

 fail:
	EFX_ERR(efx, TXCNAME ": reset timed out!\n");
	return rc;
}
Exemplo n.º 20
0
int falcon_reset_xaui(struct efx_nic *efx)
{
	efx_oword_t reg;
	int count;

	EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
	falcon_write(efx, &reg, XX_PWR_RST_REG);

	/* Give some time for the link to establish */
	for (count = 0; count < 1000; count++) { /* wait upto 10ms */
		falcon_read(efx, &reg, XX_PWR_RST_REG);
		if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
			falcon_setup_xaui(efx);
			return 0;
		}
		udelay(10);
	}
	EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
	return -ETIMEDOUT;
}
Exemplo n.º 21
0
int falcon_reset_xaui(struct efx_nic *efx)
{
	efx_oword_t reg;
	int count;

	/* Start reset sequence */
	EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
	falcon_write(efx, &reg, XX_PWR_RST_REG);

	/* Wait up to 10 ms for completion, then reinitialise */
	for (count = 0; count < 1000; count++) {
		falcon_read(efx, &reg, XX_PWR_RST_REG);
		if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 &&
		    EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) {
			falcon_setup_xaui(efx);
			return 0;
		}
		udelay(10);
	}
	EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
	return -ETIMEDOUT;
}
Exemplo n.º 22
0
int falcon_reset_xaui(struct efx_nic *efx)
{
	efx_oword_t reg;
	int count;

	
	EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
	falcon_write(efx, &reg, XX_PWR_RST_REG);

	
	for (count = 0; count < 1000; count++) {
		falcon_read(efx, &reg, XX_PWR_RST_REG);
		if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 &&
		    EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) {
			falcon_setup_xaui(efx);
			return 0;
		}
		udelay(10);
	}
	EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
	return -ETIMEDOUT;
}
Exemplo n.º 23
0
/* Test generation and receipt of interrupts */
static int efx_test_interrupts(struct efx_nic *efx,
			       struct efx_self_tests *tests)
{
	struct efx_channel *channel;

	EFX_LOG(efx, "testing interrupts\n");
	tests->interrupt = -1;

	/* Reset interrupt flag */
	efx->last_irq_cpu = -1;
	smp_wmb();

	/* ACK each interrupting event queue. Receiving an interrupt due to
	 * traffic before a test event is raised is considered a pass */
	efx_for_each_channel(channel, efx) {
		if (channel->work_pending)
			efx_process_channel_now(channel);
		if (efx->last_irq_cpu >= 0)
			goto success;
	}

	efx_nic_generate_interrupt(efx);

	/* Wait for arrival of test interrupt. */
	EFX_LOG(efx, "waiting for test interrupt\n");
	schedule_timeout_uninterruptible(HZ / 10);
	if (efx->last_irq_cpu >= 0)
		goto success;

	EFX_ERR(efx, "timed out waiting for interrupt\n");
	return -ETIMEDOUT;

 success:
	EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx),
		efx->last_irq_cpu);
	tests->interrupt = 1;
	return 0;
}
Exemplo n.º 24
0
/* Wait up to 10 ms for buffered write completion */
int
falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
{
	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
	u8 status;
	int rc;

	for (;;) {
		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
				    &status, sizeof(status));
		if (rc)
			return rc;
		if (!(status & SPI_STATUS_NRDY))
			return 0;
		if (time_after_eq(jiffies, timeout)) {
			EFX_ERR(efx, "SPI write timeout on device %d"
				" last status=0x%02x\n",
				spi->device_id, status);
			return -ETIMEDOUT;
		}
		schedule_timeout_uninterruptible(1);
	}
}
Exemplo n.º 25
0
void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
{
	struct falcon_board *board = falcon_board(efx);
	u8 type_id = FALCON_BOARD_TYPE(revision_info);
	int i;

	board->major = FALCON_BOARD_MAJOR(revision_info);
	board->minor = FALCON_BOARD_MINOR(revision_info);

	for (i = 0; i < ARRAY_SIZE(board_types); i++)
		if (board_types[i].id == type_id)
			board->type = &board_types[i];

	if (board->type) {
		EFX_INFO(efx, "board is %s rev %c%d\n",
			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
			 ? board->type->ref_model : board->type->gen_type,
			 'A' + board->major, board->minor);
	} else {
		EFX_ERR(efx, "unknown board type %d\n", type_id);
		board->type = &falcon_dummy_board;
	}
}
Exemplo n.º 26
0
int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id)
{
	int rc;

	/* If someone is driving the bus low we just give up. */
	if (getsda(i2c) == 0 || getscl(i2c) == 0) {
		EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low."
			" Giving up.\n", __func__);
		return -EFAULT;
	}

	/* Pretend to initiate a device write */
	i2c_start(i2c);
	rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
	if (rc)
		goto out;

 out:
	i2c_stop(i2c);
	i2c_release(i2c);

	return rc;
}
Exemplo n.º 27
0
void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
{
	struct efx_board_data *data = NULL;
	int i;

	efx->board_info.type = BOARD_TYPE(revision_info);
	efx->board_info.major = BOARD_MAJOR(revision_info);
	efx->board_info.minor = BOARD_MINOR(revision_info);

	for (i = 0; i < ARRAY_SIZE(board_data); i++)
		if (board_data[i].type == efx->board_info.type)
			data = &board_data[i];

	if (data) {
		EFX_INFO(efx, "board is %s rev %c%d\n",
			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
			 ? data->ref_model : data->gen_type,
			 'A' + efx->board_info.major, efx->board_info.minor);
		efx->board_info.init = data->init;
	} else {
		EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
	}
}
Exemplo n.º 28
0
/* Zeroes out the SRAM contents.  This routine must be called in
 * process context and is allowed to sleep.
 */
static int falcon_reset_sram(struct efx_nic *efx)
{
	efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
	int count;

	/* Set the SRAM wake/sleep GPIO appropriately. */
	efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
	efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);

	/* Initiate SRAM reset */
	EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
			     FRF_AZ_SRM_INIT_EN, 1,
			     FRF_AZ_SRM_NB_SZ, 0);
	efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);

	/* Wait for SRAM reset to complete */
	count = 0;
	do {
		EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);

		/* SRAM reset is slow; expect around 16ms */
		schedule_timeout_uninterruptible(HZ / 50);

		/* Check for reset complete */
		efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
			EFX_LOG(efx, "SRAM reset complete\n");

			return 0;
		}
	} while (++count < 20);	/* wait upto 0.4 sec */

	EFX_ERR(efx, "timed out waiting for SRAM reset\n");
	return -ETIMEDOUT;
}
Exemplo n.º 29
0
static void falcon_monitor(struct efx_nic *efx)
{
	bool link_changed;
	int rc;

	BUG_ON(!mutex_is_locked(&efx->mac_lock));

	rc = falcon_board(efx)->type->monitor(efx);
	if (rc) {
		EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
			(rc == -ERANGE) ? "reported fault" : "failed");
		efx->phy_mode |= PHY_MODE_LOW_POWER;
		rc = __efx_reconfigure_port(efx);
		WARN_ON(rc);
	}

	if (LOOPBACK_INTERNAL(efx))
		link_changed = falcon_loopback_link_poll(efx);
	else
		link_changed = efx->phy_op->poll(efx);

	if (link_changed) {
		falcon_stop_nic_stats(efx);
		falcon_deconfigure_mac_wrapper(efx);

		falcon_switch_mac(efx);
		rc = efx->mac_op->reconfigure(efx);
		BUG_ON(rc);

		falcon_start_nic_stats(efx);

		efx_link_status_changed(efx);
	}

	if (EFX_IS10G(efx))
		falcon_poll_xmac(efx);
}
Exemplo n.º 30
0
static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
{
	const struct efx_spi_device *spi = efx_mtd->spi;
	struct efx_nic *efx = efx_mtd->efx;
	u8 status;
	int rc, i;

	/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
	for (i = 0; i < 40; i++) {
		__set_current_state(uninterruptible ?
				    TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
		schedule_timeout(HZ / 10);
		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
				    &status, sizeof(status));
		if (rc)
			return rc;
		if (!(status & SPI_STATUS_NRDY))
			return 0;
		if (signal_pending(current))
			return -EINTR;
	}
	EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
	return -ETIMEDOUT;
}