Exemplo n.º 1
0
static int
sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
{
	struct sfxge_evq *evq;
	efsys_mem_t *esmp;
	int rc;

	KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));

	evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
	evq->sc = sc;
	evq->index = index;
	sc->evq[index] = evq;
	esmp = &evq->mem;

	/* Initialise TX completion list */
	evq->txqs = &evq->txq;

	/* Allocate DMA space. */
	if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(SFXGE_NEVS), esmp)) != 0)
		return (rc);

	/* Allocate buffer table entries. */
	sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(SFXGE_NEVS),
				 &evq->buf_base_id);

	mtx_init(&evq->lock, "evq", NULL, MTX_DEF);

	evq->init_state = SFXGE_EVQ_INITIALIZED;

	return (0);
}
Exemplo n.º 2
0
static void
sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
{
	struct sfxge_evq *evq;

	evq = sc->evq[index];

	KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
	    ("evq->init_state != SFXGE_EVQ_STARTED"));

	SFXGE_EVQ_LOCK(evq);
	evq->init_state = SFXGE_EVQ_INITIALIZED;
	evq->read_ptr = 0;
	evq->exception = B_FALSE;

#if EFSYS_OPT_QSTATS
	/* Add event counts before discarding the common evq state */
	efx_ev_qstats_update(evq->common, sc->ev_stats);
#endif

	efx_ev_qdestroy(evq->common);
	efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
	    EFX_EVQ_NBUFS(evq->entries));
	SFXGE_EVQ_UNLOCK(evq);
}
Exemplo n.º 3
0
static int
_sfxge_ev_qctor(sfxge_t *sp, sfxge_evq_t *sep, int kmflags, uint16_t evq_size)
{
	efsys_mem_t *esmp = &(sep->se_mem);
	sfxge_dma_buffer_attr_t dma_attr;
	int rc;

	/* Compile-time structure layout checks */
	EFX_STATIC_ASSERT(sizeof (sep->__se_u1.__se_s1) <=
	    sizeof (sep->__se_u1.__se_pad));
	EFX_STATIC_ASSERT(sizeof (sep->__se_u2.__se_s2) <=
	    sizeof (sep->__se_u2.__se_pad));
	EFX_STATIC_ASSERT(sizeof (sep->__se_u3.__se_s3) <=
	    sizeof (sep->__se_u3.__se_pad));

	bzero(sep, sizeof (sfxge_evq_t));

	sep->se_sp = sp;

	dma_attr.sdba_dip	 = sp->s_dip;
	dma_attr.sdba_dattrp	 = &sfxge_evq_dma_attr;
	dma_attr.sdba_callback	 = (kmflags == KM_SLEEP) ?
	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
	dma_attr.sdba_length	 = EFX_EVQ_SIZE(evq_size);
	dma_attr.sdba_memflags	 = DDI_DMA_CONSISTENT;
	dma_attr.sdba_devaccp	 = &sfxge_evq_devacc;
	dma_attr.sdba_bindflags	 = DDI_DMA_READ | DDI_DMA_CONSISTENT;
	dma_attr.sdba_maxcookies = 1;
	dma_attr.sdba_zeroinit	 = B_FALSE;

	if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
		goto fail1;

	/* Allocate some buffer table entries */
	if ((rc = sfxge_sram_buf_tbl_alloc(sp, EFX_EVQ_NBUFS(evq_size),
	    &(sep->se_id))) != 0)
		goto fail2;

	sep->se_stpp = &(sep->se_stp);

	return (0);

fail2:
	DTRACE_PROBE(fail2);

	/* Tear down DMA setup */
	esmp->esm_addr = 0;
	sfxge_dma_buffer_destroy(esmp);

fail1:
	DTRACE_PROBE1(fail1, int, rc);

	sep->se_sp = NULL;

	SFXGE_OBJ_CHECK(sep, sfxge_evq_t);

	return (-1);
}
Exemplo n.º 4
0
static int
sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
{
	struct sfxge_evq *evq;
	efsys_mem_t *esmp;
	int rc;

	KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));

	evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
	evq->sc = sc;
	evq->index = index;
	sc->evq[index] = evq;
	esmp = &evq->mem;

	/* Build an event queue with room for one event per tx and rx buffer,
	 * plus some extra for link state events and MCDI completions.
	 * There are three tx queues in the first event queue and one in
	 * other.
	 */
	if (index == 0)
		evq->entries =
			ROUNDUP_POW_OF_TWO(sc->rxq_entries +
					   3 * sc->txq_entries +
					   128);
	else
		evq->entries =
			ROUNDUP_POW_OF_TWO(sc->rxq_entries +
					   sc->txq_entries +
					   128);

	/* Initialise TX completion list */
	evq->txqs = &evq->txq;

	/* Allocate DMA space. */
	if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0)
		return (rc);

	/* Allocate buffer table entries. */
	sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
				 &evq->buf_base_id);

	SFXGE_EVQ_LOCK_INIT(evq, device_get_nameunit(sc->dev), index);

	evq->init_state = SFXGE_EVQ_INITIALIZED;

	return (0);
}
Exemplo n.º 5
0
static void
_sfxge_ev_qdtor(sfxge_t *sp, sfxge_evq_t *sep, uint16_t evq_size)
{
	efsys_mem_t *esmp = &(sep->se_mem);
	ASSERT3P(sep->se_sp, ==, sp);
	ASSERT3P(sep->se_stpp, ==, &(sep->se_stp));
	sep->se_stpp = NULL;

	/* Free the buffer table entries */
	sfxge_sram_buf_tbl_free(sp, sep->se_id, EFX_EVQ_NBUFS(evq_size));
	sep->se_id = 0;

	/* Tear down DMA setup */
	sfxge_dma_buffer_destroy(esmp);

	sep->se_sp = NULL;

	SFXGE_OBJ_CHECK(sep, sfxge_evq_t);
}
Exemplo n.º 6
0
static void
sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
{
	struct sfxge_evq *evq;

	evq = sc->evq[index];

	KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
	    ("evq->init_state != SFXGE_EVQ_STARTED"));

	mtx_lock(&evq->lock);
	evq->init_state = SFXGE_EVQ_INITIALIZED;
	evq->read_ptr = 0;
	evq->exception = B_FALSE;

	/* Add event counts before discarding the common evq state */
	efx_ev_qstats_update(evq->common, sc->ev_stats);

	efx_ev_qdestroy(evq->common);
	efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
	    EFX_EVQ_NBUFS(SFXGE_NEVS));
	mtx_unlock(&evq->lock);
}
Exemplo n.º 7
0
static	__checkReturn	efx_rc_t
efx_mcdi_init_evq(
	__in		efx_nic_t *enp,
	__in		unsigned int instance,
	__in		efsys_mem_t *esmp,
	__in		size_t nevs,
	__in		uint32_t irq,
	__out_opt	uint32_t *irqp)
{
	efx_mcdi_req_t req;
	uint8_t payload[
	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
		MC_CMD_INIT_EVQ_OUT_LEN)];
	efx_qword_t *dma_addr;
	uint64_t addr;
	int npages;
	int i;
	int supports_rx_batching;
	efx_rc_t rc;

	npages = EFX_EVQ_NBUFS(nevs);
	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
		rc = EINVAL;
		goto fail1;
	}

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_INIT_EVQ;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);

	/*
	 * On Huntington RX and TX event batching can only be requested
	 * together (even if the datapath firmware doesn't actually support RX
	 * batching).
	 * Cut through is incompatible with RX batching and so enabling cut
	 * through disables RX batching (but it does not affect TX batching).
	 *
	 * So always enable RX and TX event batching, and enable cut through
	 * if RX event batching isn't supported (i.e. on low latency firmware).
	 */
	supports_rx_batching = enp->en_nic_cfg.enc_rx_batching_enabled ? 1 : 0;
	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
	    INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
	    INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_batching,
	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
	    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);

	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
	addr = EFSYS_MEM_ADDR(esmp);

	for (i = 0; i < npages; i++) {
		EFX_POPULATE_QWORD_2(*dma_addr,
		    EFX_DWORD_1, (uint32_t)(addr >> 32),
		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));

		dma_addr++;
		addr += EFX_BUF_SIZE;
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail2;
	}

	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail3;
	}

	if (irqp != NULL)
		*irqp = MCDI_OUT_DWORD(req, INIT_EVQ_OUT_IRQ);

	return (0);

fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Exemplo n.º 8
0
static	__checkReturn	efx_rc_t
efx_mcdi_init_evq_v2(
	__in		efx_nic_t *enp,
	__in		unsigned int instance,
	__in		efsys_mem_t *esmp,
	__in		size_t nevs,
	__in		uint32_t irq,
	__in		uint32_t us,
	__in		uint32_t flags)
{
	efx_mcdi_req_t req;
	uint8_t payload[
		MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
		    MC_CMD_INIT_EVQ_V2_OUT_LEN)];
	boolean_t interrupting;
	unsigned int evq_type;
	efx_qword_t *dma_addr;
	uint64_t addr;
	int npages;
	int i;
	efx_rc_t rc;

	npages = EFX_EVQ_NBUFS(nevs);
	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
		rc = EINVAL;
		goto fail1;
	}

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_INIT_EVQ;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;

	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);

	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);

	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
	case EFX_EVQ_FLAGS_TYPE_AUTO:
		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
		break;
	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
		break;
	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
		break;
	default:
		rc = EINVAL;
		goto fail2;
	}
	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
	    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);

	/* If the value is zero then disable the timer */
	if (us == 0) {
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
	} else {
		unsigned int ticks;

		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
			goto fail3;

		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
	}

	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);

	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
	addr = EFSYS_MEM_ADDR(esmp);

	for (i = 0; i < npages; i++) {
		EFX_POPULATE_QWORD_2(*dma_addr,
		    EFX_DWORD_1, (uint32_t)(addr >> 32),
		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));

		dma_addr++;
		addr += EFX_BUF_SIZE;
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail4;
	}

	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail5;
	}

	/* NOTE: ignore the returned IRQ param as firmware does not set it. */

	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));

	return (0);

fail5:
	EFSYS_PROBE(fail5);
fail4:
	EFSYS_PROBE(fail4);
fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Exemplo n.º 9
0
static	__checkReturn	efx_rc_t
efx_mcdi_init_evq(
	__in		efx_nic_t *enp,
	__in		unsigned int instance,
	__in		efsys_mem_t *esmp,
	__in		size_t nevs,
	__in		uint32_t irq,
	__in		uint32_t us,
	__in		uint32_t flags,
	__in		boolean_t low_latency)
{
	efx_mcdi_req_t req;
	uint8_t payload[
	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
		MC_CMD_INIT_EVQ_OUT_LEN)];
	efx_qword_t *dma_addr;
	uint64_t addr;
	int npages;
	int i;
	boolean_t interrupting;
	int ev_cut_through;
	efx_rc_t rc;

	npages = EFX_EVQ_NBUFS(nevs);
	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
		rc = EINVAL;
		goto fail1;
	}

	(void) memset(payload, 0, sizeof (payload));
	req.emr_cmd = MC_CMD_INIT_EVQ;
	req.emr_in_buf = payload;
	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
	req.emr_out_buf = payload;
	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);

	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);

	/*
	 * On Huntington RX and TX event batching can only be requested together
	 * (even if the datapath firmware doesn't actually support RX
	 * batching). If event cut through is enabled no RX batching will occur.
	 *
	 * So always enable RX and TX event batching, and enable event cut
	 * through if we want low latency operation.
	 */
	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
	case EFX_EVQ_FLAGS_TYPE_AUTO:
		ev_cut_through = low_latency ? 1 : 0;
		break;
	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
		ev_cut_through = 0;
		break;
	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
		ev_cut_through = 1;
		break;
	default:
		rc = EINVAL;
		goto fail2;
	}
	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
	    INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);

	/* If the value is zero then disable the timer */
	if (us == 0) {
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
	} else {
		unsigned int ticks;

		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
			goto fail3;

		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
	}

	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);

	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
	addr = EFSYS_MEM_ADDR(esmp);

	for (i = 0; i < npages; i++) {
		EFX_POPULATE_QWORD_2(*dma_addr,
		    EFX_DWORD_1, (uint32_t)(addr >> 32),
		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));

		dma_addr++;
		addr += EFX_BUF_SIZE;
	}

	efx_mcdi_execute(enp, &req);

	if (req.emr_rc != 0) {
		rc = req.emr_rc;
		goto fail4;
	}

	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
		rc = EMSGSIZE;
		goto fail5;
	}

	/* NOTE: ignore the returned IRQ param as firmware does not set it. */

	return (0);

fail5:
	EFSYS_PROBE(fail5);
fail4:
	EFSYS_PROBE(fail4);
fail3:
	EFSYS_PROBE(fail3);
fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}
Exemplo n.º 10
0
static int
sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
{
	struct sfxge_evq *evq;
	efsys_mem_t *esmp;
	int count;
	int rc;

	evq = sc->evq[index];
	esmp = &evq->mem;

	KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
	    ("evq->init_state != SFXGE_EVQ_INITIALIZED"));

	/* Clear all events. */
	(void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(SFXGE_NEVS));

	/* Program the buffer table. */
	if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
	    EFX_EVQ_NBUFS(SFXGE_NEVS))) != 0)
		return rc;

	/* Create the common code event queue. */
	if ((rc = efx_ev_qcreate(sc->enp, index, esmp, SFXGE_NEVS,
	    evq->buf_base_id, &evq->common)) != 0)
		goto fail;

	mtx_lock(&evq->lock);

	/* Set the default moderation */
	(void)efx_ev_qmoderate(evq->common, sc->ev_moderation);

	/* Prime the event queue for interrupts */
	if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
		goto fail2;

	evq->init_state = SFXGE_EVQ_STARTING;

	mtx_unlock(&evq->lock);

	/* Wait for the initialization event */
	count = 0;
	do {
		/* Pause for 100 ms */
		pause("sfxge evq init", hz / 10);

		/* Check to see if the test event has been processed */
		if (evq->init_state == SFXGE_EVQ_STARTED)
			goto done;

	} while (++count < 20);

	rc = ETIMEDOUT;
	goto fail3;

done:
	return (0);

fail3:
	mtx_lock(&evq->lock);
	evq->init_state = SFXGE_EVQ_INITIALIZED;
fail2:
	mtx_unlock(&evq->lock);
	efx_ev_qdestroy(evq->common);
fail:
	efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
	    EFX_EVQ_NBUFS(SFXGE_NEVS));

	return (rc);
}