Exemplo n.º 1
0
static int
sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
{
	struct sfxge_evq *evq;
	efsys_mem_t *esmp;
	int rc;

	KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));

	evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
	evq->sc = sc;
	evq->index = index;
	sc->evq[index] = evq;
	esmp = &evq->mem;

	/* Initialise TX completion list */
	evq->txqs = &evq->txq;

	/* Allocate DMA space. */
	if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(SFXGE_NEVS), esmp)) != 0)
		return (rc);

	/* Allocate buffer table entries. */
	sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(SFXGE_NEVS),
				 &evq->buf_base_id);

	mtx_init(&evq->lock, "evq", NULL, MTX_DEF);

	evq->init_state = SFXGE_EVQ_INITIALIZED;

	return (0);
}
Exemplo n.º 2
0
static int
_sfxge_ev_qctor(sfxge_t *sp, sfxge_evq_t *sep, int kmflags, uint16_t evq_size)
{
	efsys_mem_t *esmp = &(sep->se_mem);
	sfxge_dma_buffer_attr_t dma_attr;
	int rc;

	/* Compile-time structure layout checks */
	EFX_STATIC_ASSERT(sizeof (sep->__se_u1.__se_s1) <=
	    sizeof (sep->__se_u1.__se_pad));
	EFX_STATIC_ASSERT(sizeof (sep->__se_u2.__se_s2) <=
	    sizeof (sep->__se_u2.__se_pad));
	EFX_STATIC_ASSERT(sizeof (sep->__se_u3.__se_s3) <=
	    sizeof (sep->__se_u3.__se_pad));

	bzero(sep, sizeof (sfxge_evq_t));

	sep->se_sp = sp;

	dma_attr.sdba_dip	 = sp->s_dip;
	dma_attr.sdba_dattrp	 = &sfxge_evq_dma_attr;
	dma_attr.sdba_callback	 = (kmflags == KM_SLEEP) ?
	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
	dma_attr.sdba_length	 = EFX_EVQ_SIZE(evq_size);
	dma_attr.sdba_memflags	 = DDI_DMA_CONSISTENT;
	dma_attr.sdba_devaccp	 = &sfxge_evq_devacc;
	dma_attr.sdba_bindflags	 = DDI_DMA_READ | DDI_DMA_CONSISTENT;
	dma_attr.sdba_maxcookies = 1;
	dma_attr.sdba_zeroinit	 = B_FALSE;

	if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
		goto fail1;

	/* Allocate some buffer table entries */
	if ((rc = sfxge_sram_buf_tbl_alloc(sp, EFX_EVQ_NBUFS(evq_size),
	    &(sep->se_id))) != 0)
		goto fail2;

	sep->se_stpp = &(sep->se_stp);

	return (0);

fail2:
	DTRACE_PROBE(fail2);

	/* Tear down DMA setup */
	esmp->esm_addr = 0;
	sfxge_dma_buffer_destroy(esmp);

fail1:
	DTRACE_PROBE1(fail1, int, rc);

	sep->se_sp = NULL;

	SFXGE_OBJ_CHECK(sep, sfxge_evq_t);

	return (-1);
}
Exemplo n.º 3
0
static int
sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
{
	struct sfxge_evq *evq;
	efsys_mem_t *esmp;
	int rc;

	KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));

	evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
	evq->sc = sc;
	evq->index = index;
	sc->evq[index] = evq;
	esmp = &evq->mem;

	/* Build an event queue with room for one event per tx and rx buffer,
	 * plus some extra for link state events and MCDI completions.
	 * There are three tx queues in the first event queue and one in
	 * other.
	 */
	if (index == 0)
		evq->entries =
			ROUNDUP_POW_OF_TWO(sc->rxq_entries +
					   3 * sc->txq_entries +
					   128);
	else
		evq->entries =
			ROUNDUP_POW_OF_TWO(sc->rxq_entries +
					   sc->txq_entries +
					   128);

	/* Initialise TX completion list */
	evq->txqs = &evq->txq;

	/* Allocate DMA space. */
	if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0)
		return (rc);

	/* Allocate buffer table entries. */
	sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
				 &evq->buf_base_id);

	SFXGE_EVQ_LOCK_INIT(evq, device_get_nameunit(sc->dev), index);

	evq->init_state = SFXGE_EVQ_INITIALIZED;

	return (0);
}
Exemplo n.º 4
0
int
sfc_ev_qinit(struct sfc_adapter *sa,
	     enum sfc_evq_type type, unsigned int type_index,
	     unsigned int entries, int socket_id, struct sfc_evq **evqp)
{
	struct sfc_evq *evq;
	int rc;

	sfc_log_init(sa, "type=%s type_index=%u",
		     sfc_evq_type2str(type), type_index);

	SFC_ASSERT(rte_is_power_of_2(entries));

	rc = ENOMEM;
	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
				 socket_id);
	if (evq == NULL)
		goto fail_evq_alloc;

	evq->sa = sa;
	evq->type = type;
	evq->entries = entries;

	/* Allocate DMA space */
	rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
			   EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem);
	if (rc != 0)
		goto fail_dma_alloc;

	evq->init_state = SFC_EVQ_INITIALIZED;

	sa->evq_count++;

	*evqp = evq;

	return 0;

fail_dma_alloc:
	rte_free(evq);

fail_evq_alloc:

	sfc_log_init(sa, "failed %d", rc);
	return rc;
}
Exemplo n.º 5
0
static int
sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
{
	struct sfxge_evq *evq;
	efsys_mem_t *esmp;
	int count;
	int rc;

	evq = sc->evq[index];
	esmp = &evq->mem;

	KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
	    ("evq->init_state != SFXGE_EVQ_INITIALIZED"));

	/* Clear all events. */
	(void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(SFXGE_NEVS));

	/* Program the buffer table. */
	if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
	    EFX_EVQ_NBUFS(SFXGE_NEVS))) != 0)
		return rc;

	/* Create the common code event queue. */
	if ((rc = efx_ev_qcreate(sc->enp, index, esmp, SFXGE_NEVS,
	    evq->buf_base_id, &evq->common)) != 0)
		goto fail;

	mtx_lock(&evq->lock);

	/* Set the default moderation */
	(void)efx_ev_qmoderate(evq->common, sc->ev_moderation);

	/* Prime the event queue for interrupts */
	if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
		goto fail2;

	evq->init_state = SFXGE_EVQ_STARTING;

	mtx_unlock(&evq->lock);

	/* Wait for the initialization event */
	count = 0;
	do {
		/* Pause for 100 ms */
		pause("sfxge evq init", hz / 10);

		/* Check to see if the test event has been processed */
		if (evq->init_state == SFXGE_EVQ_STARTED)
			goto done;

	} while (++count < 20);

	rc = ETIMEDOUT;
	goto fail3;

done:
	return (0);

fail3:
	mtx_lock(&evq->lock);
	evq->init_state = SFXGE_EVQ_INITIALIZED;
fail2:
	mtx_unlock(&evq->lock);
	efx_ev_qdestroy(evq->common);
fail:
	efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
	    EFX_EVQ_NBUFS(SFXGE_NEVS));

	return (rc);
}
Exemplo n.º 6
0
/* Event queue HW index allocation scheme is described in sfc_ev.h. */
int
sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
{
	struct sfc_adapter *sa = evq->sa;
	efsys_mem_t *esmp;
	uint32_t evq_flags = sa->evq_flags;
	unsigned int total_delay_us;
	unsigned int delay_us;
	int rc;

	sfc_log_init(sa, "hw_index=%u", hw_index);

	esmp = &evq->mem;

	evq->evq_index = hw_index;

	/* Clear all events */
	(void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));

	if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
		evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
	else
		evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;

	/* Create the common code event queue */
	rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
			    0 /* unused on EF10 */, 0, evq_flags,
			    &evq->common);
	if (rc != 0)
		goto fail_ev_qcreate;

	SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
	if (evq->dp_rxq != 0) {
		if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
			evq->callbacks = &sfc_ev_callbacks_efx_rx;
		else
			evq->callbacks = &sfc_ev_callbacks_dp_rx;
	} else if (evq->dp_txq != 0) {
		if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
			evq->callbacks = &sfc_ev_callbacks_efx_tx;
		else
			evq->callbacks = &sfc_ev_callbacks_dp_tx;
	} else {
		evq->callbacks = &sfc_ev_callbacks;
	}

	evq->init_state = SFC_EVQ_STARTING;

	/* Wait for the initialization event */
	total_delay_us = 0;
	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
	do {
		(void)sfc_ev_qpoll(evq);

		/* Check to see if the initialization complete indication
		 * posted by the hardware.
		 */
		if (evq->init_state == SFC_EVQ_STARTED)
			goto done;

		/* Give event queue some time to init */
		rte_delay_us(delay_us);

		total_delay_us += delay_us;

		/* Exponential backoff */
		delay_us *= 2;
		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;

	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);

	rc = ETIMEDOUT;
	goto fail_timedout;

done:
	return 0;

fail_timedout:
	evq->init_state = SFC_EVQ_INITIALIZED;
	efx_ev_qdestroy(evq->common);

fail_ev_qcreate:
	sfc_log_init(sa, "failed %d", rc);
	return rc;
}