Example #1
0
	__checkReturn	int
efx_intr_init(
	__in		efx_nic_t *enp,
	__in		efx_intr_type_t type,
	__in		efsys_mem_t *esmp)
{
	efx_intr_t *eip = &(enp->en_intr);
	efx_oword_t oword;
	int rc;

	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);

	if (enp->en_mod_flags & EFX_MOD_INTR) {
		rc = EINVAL;
		goto fail1;
	}

	enp->en_mod_flags |= EFX_MOD_INTR;

	eip->ei_type = type;
	eip->ei_esmp = esmp;

	/*
	 * bug17213 workaround.
	 *
	 * Under legacy interrupts, don't share a level between fatal
	 * interrupts and event queue interrupts. Under MSI-X, they
	 * must share, or we won't get an interrupt.
	 */
	if (enp->en_family == EFX_FAMILY_SIENA &&
	    eip->ei_type == EFX_INTR_LINE)
		eip->ei_level = 0x1f;
	else
		eip->ei_level = 0;

	/* Enable all the genuinely fatal interrupts */
	EFX_SET_OWORD(oword);
	EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
	EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
	EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
	if (enp->en_family >= EFX_FAMILY_SIENA)
		EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
	EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);

	/* Set up the interrupt address register */
	EFX_POPULATE_OWORD_3(oword,
	    FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
	    FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
	    FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
	EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);

	return (0);

fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Example #2
0
	__checkReturn	efx_rc_t
ef10_mcdi_init(
	__in		efx_nic_t *enp,
	__in		const efx_mcdi_transport_t *emtp)
{
	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
	efsys_mem_t *esmp = emtp->emt_dma_mem;
	efx_dword_t dword;
	efx_rc_t rc;

	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
	    enp->en_family == EFX_FAMILY_MEDFORD ||
	    enp->en_family == EFX_FAMILY_MEDFORD2);
	EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);

	/*
	 * All EF10 firmware supports MCDIv2 and MCDIv1.
	 * Medford BootROM supports MCDIv2 and MCDIv1.
	 * Huntington BootROM supports MCDIv1 only.
	 */
	emip->emi_max_version = 2;

	/* A host DMA buffer is required for EF10 MCDI */
	if (esmp == NULL) {
		rc = EINVAL;
		goto fail1;
	}

	/*
	 * Ensure that the MC doorbell is in a known state before issuing MCDI
	 * commands. The recovery algorithm requires that the MC command buffer
	 * must be 256 byte aligned. See bug24769.
	 */
	if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) {
		rc = EINVAL;
		goto fail2;
	}
	EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1);
	EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);

	/* Save initial MC reboot status */
	(void) ef10_mcdi_poll_reboot(enp);

	/* Start a new epoch (allow fresh MCDI requests to succeed) */
	efx_mcdi_new_epoch(enp);

	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Example #3
0
	__checkReturn			int
siena_mac_stats_periodic(
	__in				efx_nic_t *enp,
	__in				efsys_mem_t *esmp,
	__in				uint16_t period,
	__in				boolean_t events)
{
	uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
	efx_mcdi_req_t req;
	size_t bytes;
	int rc;

	bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);

	req.emr_cmd = MC_CMD_MAC_STATS;
	req.emr_in_buf = payload;
	req.emr_in_length = sizeof (payload);
	EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
	req.emr_out_buf = NULL;
	req.emr_out_length = 0;

	MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
			    EFSYS_MEM_ADDR(esmp) & 0xffffffff);
	MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
			    EFSYS_MEM_ADDR(esmp) >> 32);
	MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);

	/*
	 * The MC DMAs aggregate statistics for our convinience, so we can
	 * avoid having to pull the statistics buffer into the cache to
	 * maintain cumulative statistics.
	 */
	MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
			    MAC_STATS_IN_DMA, 0,
			    MAC_STATS_IN_CLEAR, 0,
			    MAC_STATS_IN_PERIODIC_CHANGE, 1,
			    MAC_STATS_IN_PERIODIC_ENABLE, period ? 1 : 0,
			    MAC_STATS_IN_PERIODIC_NOEVENT, events ? 0 : 1,
			    MAC_STATS_IN_PERIOD_MS, period);

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	return (0);

fail1:
	EFSYS_PROBE1(fail1, int, rc);

	return (rc);
}
Example #4
0
			void
ef10_mcdi_send_request(
	__in		efx_nic_t *enp,
	__in		void *hdrp,
	__in		size_t hdr_len,
	__in		void *sdup,
	__in		size_t sdu_len)
{
	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
	efsys_mem_t *esmp = emtp->emt_dma_mem;
	efx_dword_t dword;
	unsigned int pos;

	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
		    enp->en_family == EFX_FAMILY_MEDFORD);

	/* Write the header */
	for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
		dword = *(efx_dword_t *)((uint8_t *)hdrp + pos);
		EFSYS_MEM_WRITED(esmp, pos, &dword);
	}

	/* Write the payload */
	for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
		dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
		EFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword);
	}

	/* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */
	EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len);
	EFSYS_PIO_WRITE_BARRIER();

	/* Ring the doorbell to post the command DMA address to the MC */
	EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
	    EFSYS_MEM_ADDR(esmp) >> 32);
	EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);

	EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
	    EFSYS_MEM_ADDR(esmp) & 0xffffffff);
	EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
}
Example #5
0
static	__checkReturn	efx_rc_t
siena_intr_init(
	__in		efx_nic_t *enp,
	__in		efx_intr_type_t type,
	__in		efsys_mem_t *esmp)
{
	efx_intr_t *eip = &(enp->en_intr);
	efx_oword_t oword;

	/*
	 * bug17213 workaround.
	 *
	 * Under legacy interrupts, don't share a level between fatal
	 * interrupts and event queue interrupts. Under MSI-X, they
	 * must share, or we won't get an interrupt.
	 */
	if (enp->en_family == EFX_FAMILY_SIENA &&
	    eip->ei_type == EFX_INTR_LINE)
		eip->ei_level = 0x1f;
	else
		eip->ei_level = 0;

	/* Enable all the genuinely fatal interrupts */
	EFX_SET_OWORD(oword);
	EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
	EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
	EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
	if (enp->en_family >= EFX_FAMILY_SIENA)
		EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
	EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);

	/* Set up the interrupt address register */
	EFX_POPULATE_OWORD_3(oword,
	    FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
	    FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
	    FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
	EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);

	return (0);
}
Example #6
0
static	__checkReturn	efx_rc_t
efx_mcdi_init_evq(
	__in		efx_nic_t *enp,
	__in		unsigned int instance,
	__in		efsys_mem_t *esmp,
	__in		size_t nevs,
	__in		uint32_t irq,
	__out_opt	uint32_t *irqp)
{
	efx_mcdi_req_t req;
	uint8_t payload[
	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
		MC_CMD_INIT_EVQ_OUT_LEN)];
	efx_qword_t *dma_addr;
	uint64_t addr;
	int npages;
	int i;
	int supports_rx_batching;
	efx_rc_t rc;

	npages = EFX_EVQ_NBUFS(nevs);
	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
		rc = EINVAL;
		goto fail1;
	}

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_INIT_EVQ;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);

	/*
	 * On Huntington RX and TX event batching can only be requested
	 * together (even if the datapath firmware doesn't actually support RX
	 * batching).
	 * Cut through is incompatible with RX batching and so enabling cut
	 * through disables RX batching (but it does not affect TX batching).
	 *
	 * So always enable RX and TX event batching, and enable cut through
	 * if RX event batching isn't supported (i.e. on low latency firmware).
	 */
	supports_rx_batching = enp->en_nic_cfg.enc_rx_batching_enabled ? 1 : 0;
	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
	    INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
	    INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_batching,
	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
	    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);

	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
	addr = EFSYS_MEM_ADDR(esmp);

	for (i = 0; i < npages; i++) {
		EFX_POPULATE_QWORD_2(*dma_addr,
		    EFX_DWORD_1, (uint32_t)(addr >> 32),
		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));

		dma_addr++;
		addr += EFX_BUF_SIZE;
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail2;
	}

	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail3;
	}

	if (irqp != NULL)
		*irqp = MCDI_OUT_DWORD(req, INIT_EVQ_OUT_IRQ);

	return (0);

fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Example #7
0
static	__checkReturn	efx_rc_t
efx_mcdi_init_rxq(
	__in		efx_nic_t *enp,
	__in		uint32_t size,
	__in		uint32_t target_evq,
	__in		uint32_t label,
	__in		uint32_t instance,
	__in		efsys_mem_t *esmp,
	__in		boolean_t disable_scatter)
{
	efx_mcdi_req_t req;
	uint8_t payload[
	    MAX(MC_CMD_INIT_RXQ_IN_LEN(EFX_RXQ_NBUFS(EFX_RXQ_MAXNDESCS)),
		MC_CMD_INIT_RXQ_OUT_LEN)];
	int npages = EFX_RXQ_NBUFS(size);
	int i;
	efx_qword_t *dma_addr;
	uint64_t addr;
	efx_rc_t rc;

	EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_INIT_RXQ;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_INIT_RXQ_IN_LEN(npages);
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_INIT_RXQ_OUT_LEN;

	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_SIZE, size);
	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_TARGET_EVQ, target_evq);
	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_LABEL, label);
	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_INSTANCE, instance);
	MCDI_IN_POPULATE_DWORD_6(req, INIT_RXQ_IN_FLAGS,
			    INIT_RXQ_IN_FLAG_BUFF_MODE, 0,
			    INIT_RXQ_IN_FLAG_HDR_SPLIT, 0,
			    INIT_RXQ_IN_FLAG_TIMESTAMP, 0,
			    INIT_RXQ_IN_CRC_MODE, 0,
			    INIT_RXQ_IN_FLAG_PREFIX, 1,
			    INIT_RXQ_IN_FLAG_DISABLE_SCATTER, disable_scatter);
	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_OWNER_ID, 0);
	MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);

	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
	addr = EFSYS_MEM_ADDR(esmp);

	for (i = 0; i < npages; i++) {
		EFX_POPULATE_QWORD_2(*dma_addr,
		    EFX_DWORD_1, (uint32_t)(addr >> 32),
		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));

		dma_addr++;
		addr += EFX_BUF_SIZE;
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail1;
	}

	return (0);

fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Example #8
0
static	__checkReturn	efx_rc_t
efx_mcdi_init_evq_v2(
	__in		efx_nic_t *enp,
	__in		unsigned int instance,
	__in		efsys_mem_t *esmp,
	__in		size_t nevs,
	__in		uint32_t irq,
	__in		uint32_t us,
	__in		uint32_t flags)
{
	efx_mcdi_req_t req;
	uint8_t payload[
		MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
		    MC_CMD_INIT_EVQ_V2_OUT_LEN)];
	boolean_t interrupting;
	unsigned int evq_type;
	efx_qword_t *dma_addr;
	uint64_t addr;
	int npages;
	int i;
	efx_rc_t rc;

	npages = EFX_EVQ_NBUFS(nevs);
	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
		rc = EINVAL;
		goto fail1;
	}

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_INIT_EVQ;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;

	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);

	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);

	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
	case EFX_EVQ_FLAGS_TYPE_AUTO:
		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
		break;
	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
		break;
	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
		break;
	default:
		rc = EINVAL;
		goto fail2;
	}
	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
	    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);

	/* If the value is zero then disable the timer */
	if (us == 0) {
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
	} else {
		unsigned int ticks;

		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
			goto fail3;

		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
	}

	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);

	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
	addr = EFSYS_MEM_ADDR(esmp);

	for (i = 0; i < npages; i++) {
		EFX_POPULATE_QWORD_2(*dma_addr,
		    EFX_DWORD_1, (uint32_t)(addr >> 32),
		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));

		dma_addr++;
		addr += EFX_BUF_SIZE;
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail4;
	}

	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail5;
	}

	/* NOTE: ignore the returned IRQ param as firmware does not set it. */

	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));

	return (0);

fail5:
	EFSYS_PROBE(fail5);
fail4:
	EFSYS_PROBE(fail4);
fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Example #9
0
static	__checkReturn	efx_rc_t
efx_mcdi_init_evq(
	__in		efx_nic_t *enp,
	__in		unsigned int instance,
	__in		efsys_mem_t *esmp,
	__in		size_t nevs,
	__in		uint32_t irq,
	__in		uint32_t us,
	__in		uint32_t flags,
	__in		boolean_t low_latency)
{
	efx_mcdi_req_t req;
	uint8_t payload[
	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
		MC_CMD_INIT_EVQ_OUT_LEN)];
	efx_qword_t *dma_addr;
	uint64_t addr;
	int npages;
	int i;
	boolean_t interrupting;
	int ev_cut_through;
	efx_rc_t rc;

	npages = EFX_EVQ_NBUFS(nevs);
	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
		rc = EINVAL;
		goto fail1;
	}

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_INIT_EVQ;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);

	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);

	/*
	 * On Huntington RX and TX event batching can only be requested together
	 * (even if the datapath firmware doesn't actually support RX
	 * batching). If event cut through is enabled no RX batching will occur.
	 *
	 * So always enable RX and TX event batching, and enable event cut
	 * through if we want low latency operation.
	 */
	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
	case EFX_EVQ_FLAGS_TYPE_AUTO:
		ev_cut_through = low_latency ? 1 : 0;
		break;
	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
		ev_cut_through = 0;
		break;
	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
		ev_cut_through = 1;
		break;
	default:
		rc = EINVAL;
		goto fail2;
	}
	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
	    INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);

	/* If the value is zero then disable the timer */
	if (us == 0) {
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
	} else {
		unsigned int ticks;

		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
			goto fail3;

		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
	}

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);

	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
	addr = EFSYS_MEM_ADDR(esmp);

	for (i = 0; i < npages; i++) {
		EFX_POPULATE_QWORD_2(*dma_addr,
		    EFX_DWORD_1, (uint32_t)(addr >> 32),
		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));

		dma_addr++;
		addr += EFX_BUF_SIZE;
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail4;
	}

	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail5;
	}

	/* NOTE: ignore the returned IRQ param as firmware does not set it. */

	return (0);

fail5:
	EFSYS_PROBE(fail5);
fail4:
	EFSYS_PROBE(fail4);
fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Example #10
0
static	__checkReturn	efx_rc_t
efx_mcdi_init_txq(
	__in		efx_nic_t *enp,
	__in		uint32_t size,
	__in		uint32_t target_evq,
	__in		uint32_t label,
	__in		uint32_t instance,
	__in		uint16_t flags,
	__in		efsys_mem_t *esmp)
{
	efx_mcdi_req_t req;
	uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
			    MC_CMD_INIT_TXQ_OUT_LEN)];
	efx_qword_t *dma_addr;
	uint64_t addr;
	int npages;
	int i;
	efx_rc_t rc;

	EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
	    EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));

	npages = EFX_TXQ_NBUFS(size);
	if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
		rc = EINVAL;
		goto fail1;
	}

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_INIT_TXQ;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;

	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);

	MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
	    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
	    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
	    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
	    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
	    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
	    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
	    INIT_TXQ_IN_CRC_MODE, 0,
	    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);

	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);

	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
	addr = EFSYS_MEM_ADDR(esmp);

	for (i = 0; i < npages; i++) {
		EFX_POPULATE_QWORD_2(*dma_addr,
		    EFX_DWORD_1, (uint32_t)(addr >> 32),
		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));

		dma_addr++;
		addr += EFX_BUF_SIZE;
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail2;
	}

	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Example #11
0
			void
hunt_mcdi_request_copyin(
	__in		efx_nic_t *enp,
	__in		efx_mcdi_req_t *emrp,
	__in		unsigned int seq,
	__in		boolean_t ev_cpl,
	__in		boolean_t new_epoch)
{
	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
	efsys_mem_t *esmp = emtp->emt_dma_mem;
	efx_mcdi_header_type_t hdr_type;
	efx_dword_t dword;
	efx_dword_t hdr[2];
	unsigned int xflags;
	unsigned int pos;
	size_t offset;

	EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_HUNTINGTON);

	xflags = 0;
	if (ev_cpl)
		xflags |= MCDI_HEADER_XFLAGS_EVREQ;

	offset = 0;

	hdr_type = EFX_MCDI_HEADER_TYPE(emrp->emr_cmd,
	    MAX(emrp->emr_in_length, emrp->emr_out_length));

	if (hdr_type == EFX_MCDI_HEADER_TYPE_V2) {
		/* Construct MCDI v2 header */
		EFX_POPULATE_DWORD_8(hdr[0],
		    MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
		    MCDI_HEADER_RESYNC, 1,
		    MCDI_HEADER_DATALEN, 0,
		    MCDI_HEADER_SEQ, seq,
		    MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
		    MCDI_HEADER_ERROR, 0,
		    MCDI_HEADER_RESPONSE, 0,
		    MCDI_HEADER_XFLAGS, xflags);
		EFSYS_MEM_WRITED(esmp, offset, &hdr[0]);
		offset += sizeof (efx_dword_t);

		EFX_POPULATE_DWORD_2(hdr[1],
		    MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
		    MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
		EFSYS_MEM_WRITED(esmp, offset, &hdr[1]);
		offset += sizeof (efx_dword_t);
	} else {
		/* Construct MCDI v1 header */
		EFX_POPULATE_DWORD_8(hdr[0],
		    MCDI_HEADER_CODE, emrp->emr_cmd,
		    MCDI_HEADER_RESYNC, 1,
		    MCDI_HEADER_DATALEN, emrp->emr_in_length,
		    MCDI_HEADER_SEQ, seq,
		    MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
		    MCDI_HEADER_ERROR, 0,
		    MCDI_HEADER_RESPONSE, 0,
		    MCDI_HEADER_XFLAGS, xflags);
		EFSYS_MEM_WRITED(esmp, 0, &hdr[0]);
		offset += sizeof (efx_dword_t);
	}

#if EFSYS_OPT_MCDI_LOGGING
	if (emtp->emt_logger != NULL) {
		emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
		    &hdr, offset,
		    emrp->emr_in_buf, emrp->emr_in_length);
	}
#endif /* EFSYS_OPT_MCDI_LOGGING */

	/* Construct the payload */
	for (pos = 0; pos < emrp->emr_in_length; pos += sizeof (efx_dword_t)) {
		memcpy(&dword, MCDI_IN(*emrp, efx_dword_t, pos),
		    MIN(sizeof (dword), emrp->emr_in_length - pos));
		EFSYS_MEM_WRITED(esmp, offset + pos, &dword);
	}

	/* Ring the doorbell to post the command DMA address to the MC */
	EFSYS_ASSERT((EFSYS_MEM_ADDR(esmp) & 0xFF) == 0);

	/* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */
	EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, offset + emrp->emr_in_length);
	EFSYS_PIO_WRITE_BARRIER();

	EFX_POPULATE_DWORD_1(dword,
	    EFX_DWORD_0, EFSYS_MEM_ADDR(esmp) >> 32);
	EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);

	EFX_POPULATE_DWORD_1(dword,
	    EFX_DWORD_0, EFSYS_MEM_ADDR(esmp) & 0xffffffff);
	EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
}