Пример #1
0
static int
_sfxge_ev_qctor(sfxge_t *sp, sfxge_evq_t *sep, int kmflags, uint16_t evq_size)
{
	efsys_mem_t *esmp = &(sep->se_mem);
	sfxge_dma_buffer_attr_t dma_attr;
	int rc;

	/* Compile-time structure layout checks */
	EFX_STATIC_ASSERT(sizeof (sep->__se_u1.__se_s1) <=
	    sizeof (sep->__se_u1.__se_pad));
	EFX_STATIC_ASSERT(sizeof (sep->__se_u2.__se_s2) <=
	    sizeof (sep->__se_u2.__se_pad));
	EFX_STATIC_ASSERT(sizeof (sep->__se_u3.__se_s3) <=
	    sizeof (sep->__se_u3.__se_pad));

	bzero(sep, sizeof (sfxge_evq_t));

	sep->se_sp = sp;

	dma_attr.sdba_dip	 = sp->s_dip;
	dma_attr.sdba_dattrp	 = &sfxge_evq_dma_attr;
	dma_attr.sdba_callback	 = (kmflags == KM_SLEEP) ?
	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
	dma_attr.sdba_length	 = EFX_EVQ_SIZE(evq_size);
	dma_attr.sdba_memflags	 = DDI_DMA_CONSISTENT;
	dma_attr.sdba_devaccp	 = &sfxge_evq_devacc;
	dma_attr.sdba_bindflags	 = DDI_DMA_READ | DDI_DMA_CONSISTENT;
	dma_attr.sdba_maxcookies = 1;
	dma_attr.sdba_zeroinit	 = B_FALSE;

	if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
		goto fail1;

	/* Allocate some buffer table entries */
	if ((rc = sfxge_sram_buf_tbl_alloc(sp, EFX_EVQ_NBUFS(evq_size),
	    &(sep->se_id))) != 0)
		goto fail2;

	sep->se_stpp = &(sep->se_stp);

	return (0);

fail2:
	DTRACE_PROBE(fail2);

	/* Tear down DMA setup */
	esmp->esm_addr = 0;
	sfxge_dma_buffer_destroy(esmp);

fail1:
	DTRACE_PROBE1(fail1, int, rc);

	sep->se_sp = NULL;

	SFXGE_OBJ_CHECK(sep, sfxge_evq_t);

	return (-1);
}
Пример #2
0
	__checkReturn			int
siena_mac_stats_upload(
	__in				efx_nic_t *enp,
	__in				efsys_mem_t *esmp)
{
	uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
	efx_mcdi_req_t req;
	size_t bytes;
	int rc;

	EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
	    EFX_MAC_STATS_SIZE);

	bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);

	req.emr_cmd = MC_CMD_MAC_STATS;
	req.emr_in_buf = payload;
	req.emr_in_length = sizeof (payload);
	EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

	MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
			    EFSYS_MEM_ADDR(esmp) & 0xffffffff);
	MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
			    EFSYS_MEM_ADDR(esmp) >> 32);
	MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);

	/*
	 * The MC DMAs aggregate statistics for our convinience, so we can
	 * avoid having to pull the statistics buffer into the cache to
	 * maintain cumulative statistics.
	 */
	MCDI_IN_POPULATE_DWORD_3(req, MAC_STATS_IN_CMD,
				    MAC_STATS_IN_DMA, 1,
				    MAC_STATS_IN_CLEAR, 0,
				    MAC_STATS_IN_PERIODIC_CHANGE, 0);

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	return (0);

fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Пример #3
0
	__checkReturn	efx_rc_t
ef10_filter_init(
	__in		efx_nic_t *enp)
{
	efx_rc_t rc;
	ef10_filter_table_t *eftp;

	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
		    enp->en_family == EFX_FAMILY_MEDFORD);

#define	MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN));
	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO));
#undef MATCH_MASK

	EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);

	if (!eftp) {
		rc = ENOMEM;
		goto fail1;
	}

	enp->en_filter.ef_ef10_filter_table = eftp;

	return (0);

fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Пример #4
0
	__checkReturn			int
siena_mac_stats_clear(
	__in				efx_nic_t *enp)
{
	uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
	efx_mcdi_req_t req;
	int rc;

	req.emr_cmd = MC_CMD_MAC_STATS;
	req.emr_in_buf = payload;
	req.emr_in_length = sizeof (payload);
	EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

	MCDI_IN_POPULATE_DWORD_3(req, MAC_STATS_IN_CMD,
				    MAC_STATS_IN_DMA, 0,
				    MAC_STATS_IN_CLEAR, 1,
				    MAC_STATS_IN_PERIODIC_CHANGE, 0);

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	return (0);

fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Пример #5
0
	__checkReturn		int
siena_nvram_partn_lock(
	__in			efx_nic_t *enp,
	__in			unsigned int partn)
{
	efx_mcdi_req_t req;
	uint8_t payload[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
	int rc;

	req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_IN_LEN;
	EFX_STATIC_ASSERT(MC_CMD_NVRAM_UPDATE_START_OUT_LEN == 0);
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

	MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_IN_TYPE, partn);

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	return (0);

fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Пример #6
0
	__checkReturn	efx_rc_t
efx_nic_probe(
	__in		efx_nic_t *enp,
	__in		efx_fw_variant_t efv)
{
	const efx_nic_ops_t *enop;
	efx_rc_t rc;

	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
#if EFSYS_OPT_MCDI
	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
#endif	/* EFSYS_OPT_MCDI */
	EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));

	/* Ensure FW variant codes match with MC_CMD_FW codes */
	EFX_STATIC_ASSERT(EFX_FW_VARIANT_FULL_FEATURED ==
	    MC_CMD_FW_FULL_FEATURED);
	EFX_STATIC_ASSERT(EFX_FW_VARIANT_LOW_LATENCY ==
	    MC_CMD_FW_LOW_LATENCY);
	EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM ==
	    MC_CMD_FW_PACKED_STREAM);
	EFX_STATIC_ASSERT(EFX_FW_VARIANT_HIGH_TX_RATE ==
	    MC_CMD_FW_HIGH_TX_RATE);
	EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1 ==
	    MC_CMD_FW_PACKED_STREAM_HASH_MODE_1);
	EFX_STATIC_ASSERT(EFX_FW_VARIANT_RULES_ENGINE ==
	    MC_CMD_FW_RULES_ENGINE);
	EFX_STATIC_ASSERT(EFX_FW_VARIANT_DPDK ==
	    MC_CMD_FW_DPDK);
	EFX_STATIC_ASSERT(EFX_FW_VARIANT_DONT_CARE ==
	    (int)MC_CMD_FW_DONT_CARE);

	enop = enp->en_enop;
	enp->efv = efv;

	if ((rc = enop->eno_probe(enp)) != 0)
		goto fail1;

	if ((rc = efx_phy_probe(enp)) != 0)
		goto fail2;

	enp->en_mod_flags |= EFX_MOD_PROBE;

	return (0);

fail2:
	EFSYS_PROBE(fail2);

	enop->eno_unprobe(enp);

fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Пример #7
0
static	__checkReturn	uint32_t
ef10_filter_hash(
	__in		efx_filter_spec_t *spec)
{
	EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t))
			    == 0);
	EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) %
			    sizeof (uint32_t)) == 0);

	/*
	 * As the area of the efx_filter_spec_t we need to hash is DWORD
	 * aligned and an exact number of DWORDs in size we can use the
	 * optimised efx_hash_dwords() rather than efx_hash_bytes()
	 */
	return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid,
			(sizeof (efx_filter_spec_t) -
			EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) /
			sizeof (uint32_t), 0));
}
Пример #8
0
	__checkReturn	efx_rc_t
efx_filter_init(
	__in		efx_nic_t *enp)
{
	const efx_filter_ops_t *efop;
	efx_rc_t rc;

	/* Check that efx_filter_spec_t is 64 bytes. */
	EFX_STATIC_ASSERT(sizeof (efx_filter_spec_t) == 64);

	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
	EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER));

	switch (enp->en_family) {
#if EFSYS_OPT_SIENA
	case EFX_FAMILY_SIENA:
		efop = &__efx_filter_siena_ops;
		break;
#endif /* EFSYS_OPT_SIENA */

#if EFSYS_OPT_HUNTINGTON
	case EFX_FAMILY_HUNTINGTON:
		efop = &__efx_filter_ef10_ops;
		break;
#endif /* EFSYS_OPT_HUNTINGTON */

#if EFSYS_OPT_MEDFORD
	case EFX_FAMILY_MEDFORD:
		efop = &__efx_filter_ef10_ops;
		break;
#endif /* EFSYS_OPT_MEDFORD */

	default:
		EFSYS_ASSERT(0);
		rc = ENOTSUP;
		goto fail1;
	}

	if ((rc = efop->efo_init(enp)) != 0)
		goto fail2;

	enp->en_efop = efop;
	enp->en_mod_flags |= EFX_MOD_FILTER;
	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	enp->en_efop = NULL;
	enp->en_mod_flags &= ~EFX_MOD_FILTER;
	return (rc);
}
Пример #9
0
	__checkReturn	int
siena_phy_verify(
	__in		efx_nic_t *enp)
{
	efx_mcdi_req_t req;
	uint8_t outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
	uint32_t state;
	int rc;

	req.emr_cmd = MC_CMD_GET_PHY_STATE;
	EFX_STATIC_ASSERT(MC_CMD_GET_PHY_STATE_IN_LEN == 0);
	req.emr_in_buf = NULL;
	req.emr_in_length = 0;
	req.emr_out_buf = outbuf;
	req.emr_out_length = sizeof (outbuf);

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail2;
	}

	state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
	if (state != MC_CMD_PHY_STATE_OK) {
		if (state != MC_CMD_PHY_STATE_ZOMBIE)
			EFSYS_PROBE1(mc_pcol_error, int, state);
		rc = ENOTACTIVE;
		goto fail3;
	}

	return (0);

fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Пример #10
0
	__checkReturn	efx_rc_t
siena_nic_reset(
	__in		efx_nic_t *enp)
{
	efx_mcdi_req_t req;
	efx_rc_t rc;

	EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);

	/* siena_nic_reset() is called to recover from BADASSERT failures. */
	if ((rc = efx_mcdi_read_assertion(enp)) != 0)
		goto fail1;
	if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
		goto fail2;

	/*
	 * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied
	 * for backwards compatibility with PORT_RESET_IN_LEN.
	 */
	EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0);

	req.emr_cmd = MC_CMD_ENTITY_RESET;
	req.emr_in_buf = NULL;
	req.emr_in_length = 0;
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail3;
	}

	return (0);

fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (0);
}
Пример #11
0
			void
efx_intr_status_line(
	__in		efx_nic_t *enp,
	__out		boolean_t *fatalp,
	__out		uint32_t *qmaskp)
{
	efx_intr_t *eip = &(enp->en_intr);
	efx_dword_t dword;

	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);

	/* Ensure Huntington and Falcon/Siena ISR at same location */
	EFX_STATIC_ASSERT(FR_BZ_INT_ISR0_REG_OFST ==
	    ER_DZ_BIU_INT_ISR_REG_OFST);

	/*
	 * Read the queue mask and implicitly acknowledge the
	 * interrupt.
	 */
	EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE);
	*qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);

	EFSYS_PROBE1(qmask, uint32_t, *qmaskp);

#if EFSYS_OPT_HUNTINGTON
	if (enp->en_family == EFX_FAMILY_HUNTINGTON) {
		/* Huntington reports fatal errors via events */
		*fatalp = B_FALSE;
		return;
	}
#endif
	if (*qmaskp & (1U << eip->ei_level))
		*fatalp = falconsiena_intr_check_fatal(enp);
	else
		*fatalp = B_FALSE;
}
Пример #12
0
	__checkReturn	efx_rc_t
hunt_board_cfg(
	__in		efx_nic_t *enp)
{
	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
	efx_port_t *epp = &(enp->en_port);
	uint32_t flags;
	uint32_t sysclk, dpcpu_clk;
	uint32_t bandwidth;
	efx_rc_t rc;

	/*
	 * Enable firmware workarounds for hardware errata.
	 * Expected responses are:
	 *  - 0 (zero):
	 *	Success: workaround enabled or disabled as requested.
	 *  - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
	 *	Firmware does not support the MC_CMD_WORKAROUND request.
	 *	(assume that the workaround is not supported).
	 *  - MC_CMD_ERR_ENOENT (reported as ENOENT):
	 *	Firmware does not support the requested workaround.
	 *  - MC_CMD_ERR_EPERM  (reported as EACCES):
	 *	Unprivileged function cannot enable/disable workarounds.
	 *
	 * See efx_mcdi_request_errcode() for MCDI error translations.
	 */

	/*
	 * If the bug35388 workaround is enabled, then use an indirect access
	 * method to avoid unsafe EVQ writes.
	 */
	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,
	    NULL);
	if ((rc == 0) || (rc == EACCES))
		encp->enc_bug35388_workaround = B_TRUE;
	else if ((rc == ENOTSUP) || (rc == ENOENT))
		encp->enc_bug35388_workaround = B_FALSE;
	else
		goto fail1;

	/*
	 * If the bug41750 workaround is enabled, then do not test interrupts,
	 * as the test will fail (seen with Greenport controllers).
	 */
	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,
	    NULL);
	if (rc == 0) {
		encp->enc_bug41750_workaround = B_TRUE;
	} else if (rc == EACCES) {
		/* Assume a controller with 40G ports needs the workaround. */
		if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)
			encp->enc_bug41750_workaround = B_TRUE;
		else
			encp->enc_bug41750_workaround = B_FALSE;
	} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
		encp->enc_bug41750_workaround = B_FALSE;
	} else {
		goto fail2;
	}
	if (EFX_PCI_FUNCTION_IS_VF(encp)) {
		/* Interrupt testing does not work for VFs. See bug50084. */
		encp->enc_bug41750_workaround = B_TRUE;
	}

	/*
	 * If the bug26807 workaround is enabled, then firmware has enabled
	 * support for chained multicast filters. Firmware will reset (FLR)
	 * functions which have filters in the hardware filter table when the
	 * workaround is enabled/disabled.
	 *
	 * We must recheck if the workaround is enabled after inserting the
	 * first hardware filter, in case it has been changed since this check.
	 */
	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
	    B_TRUE, &flags);
	if (rc == 0) {
		encp->enc_bug26807_workaround = B_TRUE;
		if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
			/*
			 * Other functions had installed filters before the
			 * workaround was enabled, and they have been reset
			 * by firmware.
			 */
			EFSYS_PROBE(bug26807_workaround_flr_done);
			/* FIXME: bump MC warm boot count ? */
		}
	} else if (rc == EACCES) {
		/*
		 * Unprivileged functions cannot enable the workaround in older
		 * firmware.
		 */
		encp->enc_bug26807_workaround = B_FALSE;
	} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
		encp->enc_bug26807_workaround = B_FALSE;
	} else {
		goto fail3;
	}

	/* Get clock frequencies (in MHz). */
	if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
		goto fail4;

	/*
	 * The Huntington timer quantum is 1536 sysclk cycles, documented for
	 * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
	 */
	encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
	if (encp->enc_bug35388_workaround) {
		encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
		ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;
	} else {
		encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
		FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
	}

	encp->enc_bug61265_workaround = B_FALSE; /* Medford only */

	/* Checksums for TSO sends can be incorrect on Huntington. */
	encp->enc_bug61297_workaround = B_TRUE;

	/* Alignment for receive packet DMA buffers */
	encp->enc_rx_buf_align_start = 1;
	encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */

	/*
	 * The workaround for bug35388 uses the top bit of transmit queue
	 * descriptor writes, preventing the use of 4096 descriptor TXQs.
	 */
	encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;

	EFX_STATIC_ASSERT(HUNT_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
	encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
	encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
	encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;

	if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
		goto fail5;
	encp->enc_required_pcie_bandwidth_mbps = bandwidth;

	/* All Huntington devices have a PCIe Gen3, 8 lane connector */
	encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;

	return (0);

fail5:
	EFSYS_PROBE(fail5);
fail4:
	EFSYS_PROBE(fail4);
fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Пример #13
0
	__checkReturn	efx_rc_t
siena_nic_register_test(
	__in		efx_nic_t *enp)
{
	efx_register_set_t *rsp;
	const uint32_t *dwordp;
	unsigned int nitems;
	unsigned int count;
	efx_rc_t rc;

	/* Fill out the register mask entries */
	EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
		    == EFX_ARRAY_SIZE(__siena_registers) * 4);

	nitems = EFX_ARRAY_SIZE(__siena_registers);
	dwordp = __siena_register_masks;
	for (count = 0; count < nitems; ++count) {
		rsp = __siena_registers + count;
		rsp->mask.eo_u32[0] = *dwordp++;
		rsp->mask.eo_u32[1] = *dwordp++;
		rsp->mask.eo_u32[2] = *dwordp++;
		rsp->mask.eo_u32[3] = *dwordp++;
	}

	/* Fill out the register table entries */
	EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
		    == EFX_ARRAY_SIZE(__siena_tables) * 4);

	nitems = EFX_ARRAY_SIZE(__siena_tables);
	dwordp = __siena_table_masks;
	for (count = 0; count < nitems; ++count) {
		rsp = __siena_tables + count;
		rsp->mask.eo_u32[0] = *dwordp++;
		rsp->mask.eo_u32[1] = *dwordp++;
		rsp->mask.eo_u32[2] = *dwordp++;
		rsp->mask.eo_u32[3] = *dwordp++;
	}

	if ((rc = efx_nic_test_registers(enp, __siena_registers,
	    EFX_ARRAY_SIZE(__siena_registers))) != 0)
		goto fail1;

	if ((rc = efx_nic_test_tables(enp, __siena_tables,
	    EFX_PATTERN_BYTE_ALTERNATE,
	    EFX_ARRAY_SIZE(__siena_tables))) != 0)
		goto fail2;

	if ((rc = efx_nic_test_tables(enp, __siena_tables,
	    EFX_PATTERN_BYTE_CHANGING,
	    EFX_ARRAY_SIZE(__siena_tables))) != 0)
		goto fail3;

	if ((rc = efx_nic_test_tables(enp, __siena_tables,
	    EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
		goto fail4;

	return (0);

fail4:
	EFSYS_PROBE(fail4);
fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Пример #14
0
static	__checkReturn	efx_rc_t
efx_mcdi_filter_op_add(
	__in		efx_nic_t *enp,
	__in		efx_filter_spec_t *spec,
	__in		unsigned int filter_op,
	__inout		ef10_filter_handle_t *handle)
{
	efx_mcdi_req_t req;
	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FILTER_OP_V3_IN_LEN,
		MC_CMD_FILTER_OP_EXT_OUT_LEN);
	efx_filter_match_flags_t match_flags;
	efx_rc_t rc;

	req.emr_cmd = MC_CMD_FILTER_OP;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_FILTER_OP_V3_IN_LEN;
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN;

	/*
	 * Remove match flag for encapsulated filters that does not correspond
	 * to the MCDI match flags
	 */
	match_flags = spec->efs_match_flags & ~EFX_FILTER_MATCH_ENCAP_TYPE;

	switch (filter_op) {
	case MC_CMD_FILTER_OP_IN_OP_REPLACE:
		MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO,
		    handle->efh_lo);
		MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI,
		    handle->efh_hi);
		/* Fall through */
	case MC_CMD_FILTER_OP_IN_OP_INSERT:
	case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
		MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, filter_op);
		break;
	default:
		EFSYS_ASSERT(0);
		rc = EINVAL;
		goto fail1;
	}

	MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID,
	    EVB_PORT_ID_ASSIGNED);
	MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS,
	    match_flags);
	if (spec->efs_dmaq_id == EFX_FILTER_SPEC_RX_DMAQ_ID_DROP) {
		MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
		    MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP);
	} else {
		MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
		    MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST);
		MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE,
		    spec->efs_dmaq_id);
	}

#if EFSYS_OPT_RX_SCALE
	if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
		uint32_t rss_context;

		if (spec->efs_rss_context == EFX_RSS_CONTEXT_DEFAULT)
			rss_context = enp->en_rss_context;
		else
			rss_context = spec->efs_rss_context;
		MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_CONTEXT,
		    rss_context);
	}
#endif

	MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_MODE,
	    spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
	    MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS :
	    MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE);
	MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_TX_DEST,
	    MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT);

	if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
		/*
		 * NOTE: Unlike most MCDI requests, the filter fields
		 * are presented in network (big endian) byte order.
		 */
		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_MAC),
		    spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_MAC),
		    spec->efs_loc_mac, EFX_MAC_ADDR_LEN);

		MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_SRC_PORT,
		    __CPU_TO_BE_16(spec->efs_rem_port));
		MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_DST_PORT,
		    __CPU_TO_BE_16(spec->efs_loc_port));

		MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_ETHER_TYPE,
		    __CPU_TO_BE_16(spec->efs_ether_type));

		MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_INNER_VLAN,
		    __CPU_TO_BE_16(spec->efs_inner_vid));
		MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_OUTER_VLAN,
		    __CPU_TO_BE_16(spec->efs_outer_vid));

		/* IP protocol (in low byte, high byte is zero) */
		MCDI_IN_SET_BYTE(req, FILTER_OP_EXT_IN_IP_PROTO,
		    spec->efs_ip_proto);

		EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
		    MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN);
		EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
		    MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN);

		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_IP),
		    &spec->efs_rem_host.eo_byte[0],
		    MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN);
		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_IP),
		    &spec->efs_loc_host.eo_byte[0],
		    MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN);

		/*
		 * On Medford, filters for encapsulated packets match based on
		 * the ether type and IP protocol in the outer frame.  In
		 * addition we need to fill in the VNI or VSID type field.
		 */
		switch (spec->efs_encap_type) {
		case EFX_TUNNEL_PROTOCOL_NONE:
			break;
		case EFX_TUNNEL_PROTOCOL_VXLAN:
		case EFX_TUNNEL_PROTOCOL_GENEVE:
			MCDI_IN_POPULATE_DWORD_1(req,
			    FILTER_OP_EXT_IN_VNI_OR_VSID,
			    FILTER_OP_EXT_IN_VNI_TYPE,
			    spec->efs_encap_type == EFX_TUNNEL_PROTOCOL_VXLAN ?
				    MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN :
				    MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE);
			break;
		case EFX_TUNNEL_PROTOCOL_NVGRE:
			MCDI_IN_POPULATE_DWORD_1(req,
			    FILTER_OP_EXT_IN_VNI_OR_VSID,
			    FILTER_OP_EXT_IN_VSID_TYPE,
			    MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE);
			break;
		default:
			EFSYS_ASSERT(0);
			rc = EINVAL;
			goto fail2;
		}

		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_VNI_OR_VSID),
		    spec->efs_vni_or_vsid, EFX_VNI_OR_VSID_LEN);

		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_IFRM_DST_MAC),
		    spec->efs_ifrm_loc_mac, EFX_MAC_ADDR_LEN);
	}

	/*
	 * Set the "MARK" or "FLAG" action for all packets matching this filter
	 * if necessary (only useful with equal stride packed stream Rx mode
	 * which provide the information in pseudo-header).
	 * These actions require MC_CMD_FILTER_OP_V3_IN msgrequest.
	 */
	if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) &&
	    (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG)) {
		rc = EINVAL;
		goto fail3;
	}
	if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) {
		MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION,
		    MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK);
		MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_MARK_VALUE,
		    spec->efs_mark);
	} else if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) {
		MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION,
		    MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG);
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail4;
	}

	if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail5;
	}

	handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO);
	handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_HI);

	return (0);

fail5:
	EFSYS_PROBE(fail5);
fail4:
	EFSYS_PROBE(fail4);
fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);

}
Пример #15
0
	__checkReturn	int
siena_mac_reconfigure(
	__in		efx_nic_t *enp)
{
	efx_port_t *epp = &(enp->en_port);
	uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
			    MC_CMD_SET_MCAST_HASH_IN_LEN)];
	efx_mcdi_req_t req;
	unsigned int fcntl;
	int rc;

	req.emr_cmd = MC_CMD_SET_MAC;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
	EFX_STATIC_ASSERT(MC_CMD_SET_MAC_OUT_LEN == 0);
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

	MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
	MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
	EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
			    epp->ep_mac_addr);
	MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
				    SET_MAC_IN_REJECT_UNCST, !epp->ep_unicst,
				    SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst);

	if (epp->ep_fcntl_autoneg)
		/* efx_fcntl_set() has already set the phy capabilities */
		fcntl = MC_CMD_FCNTL_AUTO;
	else if (epp->ep_fcntl & EFX_FCNTL_RESPOND)
		fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE)
			? MC_CMD_FCNTL_BIDIR
			: MC_CMD_FCNTL_RESPOND;
	else
		fcntl = MC_CMD_FCNTL_OFF;

	MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl);

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	/* Push multicast hash. Set the broadcast bit (0xff) appropriately */
	req.emr_cmd = MC_CMD_SET_MCAST_HASH;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN;
	EFX_STATIC_ASSERT(MC_CMD_SET_MCAST_HASH_OUT_LEN == 0);
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

	memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0),
	    epp->ep_multicst_hash, sizeof (epp->ep_multicst_hash));
	if (epp->ep_brdcst)
		EFX_SET_OWORD_BIT(*MCDI_IN2(req, efx_oword_t,
		    SET_MCAST_HASH_IN_HASH1), 0x7f);

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail2;
	}

	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Пример #16
0
	__checkReturn	int
siena_phy_reconfigure(
	__in		efx_nic_t *enp)
{
	efx_port_t *epp = &(enp->en_port);
	efx_mcdi_req_t req;
	uint8_t payload[MAX(MC_CMD_SET_ID_LED_IN_LEN,
			    MC_CMD_SET_LINK_IN_LEN)];
	uint32_t cap_mask;
	unsigned int led_mode;
	unsigned int speed;
	int rc;

	req.emr_cmd = MC_CMD_SET_LINK;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
	EFX_STATIC_ASSERT(MC_CMD_SET_LINK_OUT_LEN == 0);
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

	cap_mask = epp->ep_adv_cap_mask;
	MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
		PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
		PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
		PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
		PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
		PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
		PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
		PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
		PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
		PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
		PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);

#if EFSYS_OPT_LOOPBACK
	MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
		    epp->ep_loopback_type);
	switch (epp->ep_loopback_link_mode) {
	case EFX_LINK_100FDX:
		speed = 100;
		break;
	case EFX_LINK_1000FDX:
		speed = 1000;
		break;
	case EFX_LINK_10000FDX:
		speed = 10000;
		break;
	default:
		speed = 0;
	}
#else
	MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
	speed = 0;
#endif	/* EFSYS_OPT_LOOPBACK */
	MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);

#if EFSYS_OPT_PHY_FLAGS
	MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
#else
	MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
#endif	/* EFSYS_OPT_PHY_FLAGS */

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	/* And set the blink mode */
	req.emr_cmd = MC_CMD_SET_ID_LED;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
	EFX_STATIC_ASSERT(MC_CMD_SET_ID_LED_OUT_LEN == 0);
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

#if EFSYS_OPT_PHY_LED_CONTROL
	switch (epp->ep_phy_led_mode) {
	case EFX_PHY_LED_DEFAULT:
		led_mode = MC_CMD_LED_DEFAULT;
		break;
	case EFX_PHY_LED_OFF:
		led_mode = MC_CMD_LED_OFF;
		break;
	case EFX_PHY_LED_ON:
		led_mode = MC_CMD_LED_ON;
		break;
	default:
		EFSYS_ASSERT(0);
		led_mode = MC_CMD_LED_DEFAULT;
	}

	MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
#else
	MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
#endif	/* EFSYS_OPT_PHY_LED_CONTROL */

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail2;
	}

	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Пример #17
0
static	__checkReturn	efx_rc_t
efx_mcdi_filter_op_add(
	__in		efx_nic_t *enp,
	__in		efx_filter_spec_t *spec,
	__in		unsigned int filter_op,
	__inout		ef10_filter_handle_t *handle)
{
	efx_mcdi_req_t req;
	uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
			    MC_CMD_FILTER_OP_OUT_LEN)];
	uint32_t match_fields = 0;
	efx_rc_t rc;

	memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_FILTER_OP;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;

	switch (filter_op) {
	case MC_CMD_FILTER_OP_IN_OP_REPLACE:
		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO,
		    handle->efh_lo);
		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI,
		    handle->efh_hi);
		/* Fall through */
	case MC_CMD_FILTER_OP_IN_OP_INSERT:
	case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op);
		break;
	default:
		EFSYS_ASSERT(0);
		rc = EINVAL;
		goto fail1;
	}

	if (spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
		/*
		 * The LOC_MAC_IG match flag can represent unknown unicast
		 *  or multicast filters - use the MAC address to distinguish
		 *  them.
		 */
		if (EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
			match_fields |= 1U <<
				MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
		else
			match_fields |= 1U <<
				MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
	}

	match_fields |= spec->efs_match_flags & (~EFX_FILTER_MATCH_LOC_MAC_IG);

	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID,
	    EVB_PORT_ID_ASSIGNED);
	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS,
	    match_fields);
	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST,
	    MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE,
	    spec->efs_dmaq_id);
	if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT,
		    spec->efs_rss_context);
	}
	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE,
	    spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
	    MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
	    MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST,
	    MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);

	if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
		/*
		 * NOTE: Unlike most MCDI requests, the filter fields
		 * are presented in network (big endian) byte order.
		 */
		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC),
		    spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC),
		    spec->efs_loc_mac, EFX_MAC_ADDR_LEN);

		MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT,
		    __CPU_TO_BE_16(spec->efs_rem_port));
		MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT,
		    __CPU_TO_BE_16(spec->efs_loc_port));

		MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE,
		    __CPU_TO_BE_16(spec->efs_ether_type));

		MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN,
		    __CPU_TO_BE_16(spec->efs_inner_vid));
		MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN,
		    __CPU_TO_BE_16(spec->efs_outer_vid));

		/* IP protocol (in low byte, high byte is zero) */
		MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO,
		    spec->efs_ip_proto);

		EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
		    MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
		EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
		    MC_CMD_FILTER_OP_IN_DST_IP_LEN);

		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP),
		    &spec->efs_rem_host.eo_byte[0],
		    MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP),
		    &spec->efs_loc_host.eo_byte[0],
		    MC_CMD_FILTER_OP_IN_DST_IP_LEN);
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail2;
	}

	if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail3;
	}

	handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO);
	handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI);

	return (0);

fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);

}
Пример #18
0
	__checkReturn	int
hunt_phy_get_link(
	__in		efx_nic_t *enp,
	__out		hunt_link_state_t *hlsp)
{
	/*
	 * TBD: consider common Siena/Hunt function: Hunt is very similar
	 * (at least for now; not clear that the loopbacks should necessarily
	 * be quite the same...)
	 */

	efx_mcdi_req_t req;
	uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
			    MC_CMD_GET_LINK_OUT_LEN)];
	int rc;

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_GET_LINK;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail2;
	}

	hunt_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
			    &hlsp->hls_adv_cap_mask);
	hunt_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
			    &hlsp->hls_lp_cap_mask);

	hunt_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
			    MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
			    MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
			    &hlsp->hls_link_mode, &hlsp->hls_fcntl);

#if EFSYS_OPT_LOOPBACK
	/* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);

	hlsp->hls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
#endif	/* EFSYS_OPT_LOOPBACK */

	hlsp->hls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;

	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Пример #19
0
	__checkReturn	efx_rc_t
medford_board_cfg(
	__in		efx_nic_t *enp)
{
	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
	uint32_t sysclk, dpcpu_clk;
	uint32_t end_padding;
	uint32_t bandwidth;
	efx_rc_t rc;

	/*
	 * Enable firmware workarounds for hardware errata.
	 * Expected responses are:
	 *  - 0 (zero):
	 *	Success: workaround enabled or disabled as requested.
	 *  - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
	 *	Firmware does not support the MC_CMD_WORKAROUND request.
	 *	(assume that the workaround is not supported).
	 *  - MC_CMD_ERR_ENOENT (reported as ENOENT):
	 *	Firmware does not support the requested workaround.
	 *  - MC_CMD_ERR_EPERM  (reported as EACCES):
	 *	Unprivileged function cannot enable/disable workarounds.
	 *
	 * See efx_mcdi_request_errcode() for MCDI error translations.
	 */


	if (EFX_PCI_FUNCTION_IS_VF(encp)) {
		/*
		 * Interrupt testing does not work for VFs. See bug50084 and
		 * bug71432 comment 21.
		 */
		encp->enc_bug41750_workaround = B_TRUE;
	}

	/* Chained multicast is always enabled on Medford */
	encp->enc_bug26807_workaround = B_TRUE;

	/*
	 * If the bug61265 workaround is enabled, then interrupt holdoff timers
	 * cannot be controlled by timer table writes, so MCDI must be used
	 * (timer table writes can still be used for wakeup timers).
	 */
	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
	    NULL);
	if ((rc == 0) || (rc == EACCES))
		encp->enc_bug61265_workaround = B_TRUE;
	else if ((rc == ENOTSUP) || (rc == ENOENT))
		encp->enc_bug61265_workaround = B_FALSE;
	else
		goto fail1;

	/* Checksums for TSO sends can be incorrect on Medford. */
	encp->enc_bug61297_workaround = B_TRUE;

	/* Get clock frequencies (in MHz). */
	if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
		goto fail2;

	/*
	 * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
	 * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
	 */
	encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
	encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
		    FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;

	/* Alignment for receive packet DMA buffers */
	encp->enc_rx_buf_align_start = 1;

	/* Get the RX DMA end padding alignment configuration */
	if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
		if (rc != EACCES)
			goto fail3;

		/* Assume largest tail padding size supported by hardware */
		end_padding = 256;
	}
	encp->enc_rx_buf_align_end = end_padding;

	/*
	 * The maximum supported transmit queue size is 2048. TXQs with 4096
	 * descriptors are not supported as the top bit is used for vfifo
	 * stuffing.
	 */
	encp->enc_txq_max_ndescs = 2048;

	EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
	encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
	encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
	encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;

	/*
	 * Medford stores a single global copy of VPD, not per-PF as on
	 * Huntington.
	 */
	encp->enc_vpd_is_global = B_TRUE;

	rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
	if (rc != 0)
		goto fail4;
	encp->enc_required_pcie_bandwidth_mbps = bandwidth;
	encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;

	return (0);

fail4:
	EFSYS_PROBE(fail4);
fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Пример #20
0
	__checkReturn	efx_rc_t
siena_phy_get_link(
	__in		efx_nic_t *enp,
	__out		siena_link_state_t *slsp)
{
	efx_mcdi_req_t req;
	uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
			    MC_CMD_GET_LINK_OUT_LEN)];
	efx_rc_t rc;

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_GET_LINK;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail2;
	}

	siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
			    &slsp->sls_adv_cap_mask);
	siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
			    &slsp->sls_lp_cap_mask);

	siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
			    MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
			    MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
			    &slsp->sls_link_mode, &slsp->sls_fcntl);

#if EFSYS_OPT_LOOPBACK
	/* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
	EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);

	slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
#endif	/* EFSYS_OPT_LOOPBACK */

	slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;

	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Пример #21
0
static	__checkReturn	efx_rc_t
efx_mcdi_get_parser_disp_info(
	__in		efx_nic_t *enp,
	__out		uint32_t *list,
	__out		size_t *length)
{
	efx_mcdi_req_t req;
	uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
			    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
	efx_rc_t rc;
	uint32_t i;
	boolean_t support_unknown_ucast = B_FALSE;
	boolean_t support_unknown_mcast = B_FALSE;

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;

	MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,
	    MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	*length = MCDI_OUT_DWORD(req,
	    GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES);

	if (req.emr_out_length_used <
	    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(*length)) {
		rc = EMSGSIZE;
		goto fail2;
	}

	memcpy(list,
	    MCDI_OUT2(req,
	    uint32_t,
	    GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES),
	    (*length) * sizeof (uint32_t));
	EFX_STATIC_ASSERT(sizeof (uint32_t) ==
	    MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN);

	/*
	 * Remove UNKNOWN UCAST and MCAST flags, and if both are present, change
	 * the lower priority one to LOC_MAC_IG.
	 */
	for (i = 0; i < *length; i++) {
		if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN) {
			list[i] &=
			(~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
			support_unknown_ucast = B_TRUE;
		}
		if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) {
			list[i] &=
			(~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN);
			support_unknown_mcast = B_TRUE;
		}

		if (support_unknown_ucast && support_unknown_mcast) {
			list[i] &= EFX_FILTER_MATCH_LOC_MAC_IG;
			break;
		}
	}

	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}