Beispiel #1
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param dma_addrs Array to populate with addrs of allocated pages
 * \param page_order Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
static int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, 
				    dma_addr_t *dma_addrs,
				    unsigned int page_order)
{
	int i, j, rc;

	/* Allocate an iobuffer. */
	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic->pci_dev, &h->iobuff, page_order, NULL,
				0UL);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order) <=
		    EFHW_BUFFER_TABLE_BLOCK_SIZE);

	/* Initialise the buffer table entries. */
	rc = efhw_nic_buffer_table_alloc(nic, 0, 0, &h->bt_block);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate buffer table block",
			  __FUNCTION__);
		efhw_iopages_free(nic->pci_dev, &h->iobuff, NULL);
		return rc;
	}
	for (i = 0; i < (1 << page_order); ++i) {
		for (j = 0; j < EFHW_NIC_PAGES_IN_OS_PAGE; ++j) {
			dma_addrs[i * EFHW_NIC_PAGES_IN_OS_PAGE + j] =
				efhw_iopages_dma_addr(&h->iobuff, i);
		}
	}
	efhw_nic_buffer_table_set(nic, h->bt_block, 0,
				  1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order),
				  dma_addrs);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}
Beispiel #2
0
void
efhw_handle_timeout_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
			  unsigned instance)
{
	if (!h->timeout_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return;
	}

	h->timeout_fn(nic, instance);
}
Beispiel #3
0
int
efhw_handle_wakeup_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
			 unsigned instance, int budget)
{
	if (!h->wakeup_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return 0;
	}

	return h->wakeup_fn(nic, instance, budget);
}
Beispiel #4
0
void
efhw_handle_timeout_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
			  efhw_event_t *evp)
{
	unsigned instance = (unsigned)FALCON_EVENT_WAKE_EVQ_ID(evp);

	if (!h->timeout_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return;
	}

	h->timeout_fn(nic, instance);
}
Beispiel #5
0
int
efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
			   unsigned instance, int failed)
{
	EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);

	if (!h->dmaq_flushed_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return 0;
	}

	return h->dmaq_flushed_fn(nic, instance, true, failed);
}
Beispiel #6
0
void
efhw_handle_txdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
			   efhw_event_t *evp)
{
	int instance = (int)FALCON_EVENT_TX_FLUSH_Q_ID(evp);
	EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);

	if (!h->dmaq_flushed_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return;
	}

	h->dmaq_flushed_fn(nic, instance, false, false);
}
Beispiel #7
0
void
efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
			   efhw_event_t *evp)
{
	unsigned instance = (unsigned)FALCON_EVENT_RX_FLUSH_Q_ID(evp);
	unsigned failed = (int)FALCON_EVENT_RX_FLUSH_FAIL(evp);
	EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);

	if (!h->dmaq_flushed_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return;
	}

	h->dmaq_flushed_fn(nic, instance, true, failed);
}
Beispiel #8
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param buf_bytes Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, unsigned buf_bytes)
{
	unsigned int page_order;
	int rc;

	/* Allocate an iobuffer. */
	page_order = get_order(buf_bytes);

	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic, &h->iobuff, page_order);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(page_order <= h->buf_tbl_alloc.order);

	/* Initialise the buffer table entries. */
	falcon_nic_buffer_table_set_n(nic, h->buf_tbl_alloc.base,
				      efhw_iopages_dma_addr(&h->iobuff),
				      EFHW_NIC_PAGE_SIZE, 0,
				      1 << page_order, 0);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}
Beispiel #9
0
int efhw_keventq_poll(struct efhw_nic *nic, struct efhw_keventq *q)
{
	efhw_event_t *ev;
	int l, count = 0;

	EFHW_ASSERT(nic);
	EFHW_ASSERT(q);
	EFHW_ASSERT(q->ev_handlers);

	/* Acquire the lock, or mark the queue as needing re-checking. */
	for (;;) {
		l = q->lock;
		if (l == KEVQ_UNLOCKED) {
			if ((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l)
				break;
		} else if (l == KEVQ_LOCKED) {
			if ((int)cmpxchg(&q->lock, l, KEVQ_RECHECK) == l)
				return 0;
		} else {	/* already marked for re-checking */
			EFHW_ASSERT(l == KEVQ_RECHECK);
			return 0;
		}
	}

	if (unlikely(EFHW_EVENT_OVERFLOW(q, q)))
		goto overflow;

	ev = EFHW_EVENT_PTR(q, q, 0);

#ifndef NDEBUG
	if (!EFHW_IS_EVENT(ev))
		EFHW_TRACE("%s: %d NO EVENTS!", __FUNCTION__, q->instance);
#endif

	for (;;) {
		/* Convention for return codes for handlers is:
		 **   0   - no error, event consumed
		 **   1   - no error, event not consumed
		 **   -ve - error,    event not consumed
		 */
		if (likely(EFHW_IS_EVENT(ev))) {
			count++;

			switch (FALCON_EVENT_CODE(ev)) {
			case FALCON_EVENT_CODE_CHAR:
				falcon_handle_char_event(nic, q->ev_handlers,
							 ev);
				break;
			default:
				EFHW_ERR("efhw_keventq_poll: [%d] UNEXPECTED "
					 "EVENT:"FALCON_EVENT_FMT,
					 q->instance,
					 FALCON_EVENT_PRI_ARG(*ev));
			}

			EFHW_CLEAR_EVENT(ev);
			EFHW_EVENTQ_NEXT(q);

			ev = EFHW_EVENT_PTR(q, q, 0);
		} else {
			/* No events left.  Release the lock (checking if we
			 * need to re-poll to avoid race). */
			l = q->lock;
			if (l == KEVQ_LOCKED) {
				if ((int)cmpxchg(&q->lock, l, KEVQ_UNLOCKED)
				    == l) {
					EFHW_TRACE
					    ("efhw_keventq_poll: %d clean exit",
					     q->instance);
					goto clean_exit;
				}
			}

			/* Potentially more work to do. */
			l = q->lock;
			EFHW_ASSERT(l == KEVQ_RECHECK);
			EFHW_TEST((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l);
			EFHW_TRACE("efhw_keventq_poll: %d re-poll required",
				   q->instance);
		}
	}

	/* shouldn't get here */
	EFHW_ASSERT(0);

overflow:
	/* ?? Oh dear.  Should we poll everything that could have possibly
	 ** happened?  Or merely cry out in anguish...
	 */
	EFHW_WARN("efhw_keventq_poll: %d ***** OVERFLOW nic %d *****",
		  q->instance, nic->index);

	q->lock = KEVQ_UNLOCKED;
	return count;

clean_exit:
	/* Ack the processed events so that this event queue can potentially
	   raise interrupts again */
	if (q->instance == nic->interrupting_evq.instance)
		falcon_nic_evq_ack(nic, q->instance,
				   (EFHW_EVENT_OFFSET(q, q, 0)
				    / sizeof(efhw_event_t)));
	return count;
}