Ejemplo n.º 1
0
void efhw_nic_dtor(struct efhw_nic *nic)
{
	EFHW_ASSERT(nic);

	/* Check that we have functional units because the software only
	 * driver doesn't initialise anything hardware related any more */

	/* close interrupts is called first because the act of deregistering
	   the driver could cause this driver to change from master to slave
	   and hence the implicit interrupt mappings would be wrong */

	EFHW_TRACE("%s: functional units ... ", __func__);

	if (efhw_nic_have_functional_units(nic)) {
		efhw_nic_close_interrupts(nic);
		efhw_nic_close_hardware(nic);
	}
	EFHW_TRACE("%s: functional units ... done", __func__);

	/* destroy event queues */
	EFHW_TRACE("%s: event queues ... ", __func__);

	if (nic->interrupting_evq.evq_mask)
		efhw_keventq_dtor(nic, &nic->interrupting_evq);
	if (nic->non_interrupting_evq.evq_mask)
		efhw_keventq_dtor(nic, &nic->non_interrupting_evq);

	EFHW_TRACE("%s: event queues ... done", __func__);

	spin_lock_destroy(&nic->the_reg_lock);

	EFHW_TRACE("%s: DONE", __func__);
}
Ejemplo n.º 2
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param dma_addrs Array to populate with addrs of allocated pages
 * \param page_order Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
static int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, 
				    dma_addr_t *dma_addrs,
				    unsigned int page_order)
{
	int i, j, rc;

	/* Allocate an iobuffer. */
	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic->pci_dev, &h->iobuff, page_order, NULL,
				0UL);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order) <=
		    EFHW_BUFFER_TABLE_BLOCK_SIZE);

	/* Initialise the buffer table entries. */
	rc = efhw_nic_buffer_table_alloc(nic, 0, 0, &h->bt_block);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate buffer table block",
			  __FUNCTION__);
		efhw_iopages_free(nic->pci_dev, &h->iobuff, NULL);
		return rc;
	}
	for (i = 0; i < (1 << page_order); ++i) {
		for (j = 0; j < EFHW_NIC_PAGES_IN_OS_PAGE; ++j) {
			dma_addrs[i * EFHW_NIC_PAGES_IN_OS_PAGE + j] =
				efhw_iopages_dma_addr(&h->iobuff, i);
		}
	}
	efhw_nic_buffer_table_set(nic, h->bt_block, 0,
				  1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order),
				  dma_addrs);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}
Ejemplo n.º 3
0
int
efhw_keventq_ctor(struct efhw_nic *nic, int instance,
		  struct efhw_keventq *evq,
		  struct efhw_ev_handler *ev_handlers)
{
	int rc;
	unsigned buf_bytes = evq->hw.capacity * sizeof(efhw_event_t);

	evq->instance = instance;
	evq->ev_handlers = ev_handlers;

	/* allocate an IObuffer for the eventq */
	rc = efhw_nic_event_queue_alloc_iobuffer(nic, &evq->hw, evq->instance,
						 buf_bytes);
	if (rc < 0)
		return rc;

	/* Zero the timer-value for this queue.
	   AND Tell the nic about the event queue. */
	efhw_nic_event_queue_enable(nic, evq->instance, evq->hw.capacity,
				    evq->hw.buf_tbl_alloc.base,
				    ev_handlers != NULL /* interrupting */,
				    1 /* dos protection enable*/);

	evq->lock = KEVQ_UNLOCKED;
	evq->evq_base = efhw_iopages_ptr(&evq->hw.iobuff);
	evq->evq_ptr = 0;
	evq->evq_mask = (evq->hw.capacity * sizeof(efhw_event_t)) - 1u;

	EFHW_TRACE("%s: [%d] base=%p end=%p", __FUNCTION__, evq->instance,
		   evq->evq_base, evq->evq_base + buf_bytes);

	return 0;
}
Ejemplo n.º 4
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param buf_bytes Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, unsigned buf_bytes)
{
	unsigned int page_order;
	int rc;

	/* Allocate an iobuffer. */
	page_order = get_order(buf_bytes);

	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic, &h->iobuff, page_order);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(page_order <= h->buf_tbl_alloc.order);

	/* Initialise the buffer table entries. */
	falcon_nic_buffer_table_set_n(nic, h->buf_tbl_alloc.base,
				      efhw_iopages_dma_addr(&h->iobuff),
				      EFHW_NIC_PAGE_SIZE, 0,
				      1 << page_order, 0);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}
Ejemplo n.º 5
0
void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq)
{
	EFHW_ASSERT(evq);

	EFHW_TRACE("%s: [%d]", __FUNCTION__, evq->instance);

	/* Zero the timer-value for this queue.
	   And Tell NIC to stop using this event queue. */
	efhw_nic_event_queue_disable(nic, evq->instance, 0);

	/* free the pages used by the eventq itself */
	efhw_iopages_free(nic, &evq->hw.iobuff);
}
Ejemplo n.º 6
0
int
efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
			   unsigned instance, int failed)
{
	EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);

	if (!h->dmaq_flushed_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return 0;
	}

	return h->dmaq_flushed_fn(nic, instance, true, failed);
}
Ejemplo n.º 7
0
void
efhw_handle_txdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
			   efhw_event_t *evp)
{
	int instance = (int)FALCON_EVENT_TX_FLUSH_Q_ID(evp);
	EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);

	if (!h->dmaq_flushed_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return;
	}

	h->dmaq_flushed_fn(nic, instance, false, false);
}
Ejemplo n.º 8
0
int
efhw_keventq_ctor(struct efhw_nic *nic, int instance,
		  struct efhw_keventq *evq,
		  struct efhw_ev_handler *ev_handlers)
{
	unsigned int page_order;
	int rc;
	dma_addr_t dma_addrs[EFHW_BUFFER_TABLE_BLOCK_SIZE];
	unsigned buf_bytes = evq->hw.capacity * sizeof(efhw_event_t);

	EFHW_ASSERT(nic->devtype.arch == EFHW_ARCH_FALCON);

	EFHW_ASSERT(buf_bytes);
	page_order = get_order(buf_bytes);

	evq->instance = instance;
	evq->ev_handlers = ev_handlers;

	/* allocate an IObuffer for the eventq */
	rc = efhw_nic_event_queue_alloc_iobuffer(nic, &evq->hw, evq->instance,
						 dma_addrs, page_order);
	if (rc < 0)
		return rc;

	/* Zero the timer-value for this queue.
	   AND Tell the nic about the event queue. */
	efhw_nic_event_queue_enable(nic, evq->instance, evq->hw.capacity,
				    evq->hw.bt_block->btb_vaddr >>
					EFHW_NIC_PAGE_SHIFT,
				    dma_addrs, 
				    1 << page_order,
				    ev_handlers != NULL /* interrupting */,
				    1 /* dos protection enable*/,
				    0 /* not used on falcon */,
				    0 /* not used on falcon */,
				    0 /* not used on falcon */,
				    NULL /* not used on falcon */,
				    NULL /* not used on falcon */);

	evq->lock = KEVQ_UNLOCKED;
	evq->evq_base = efhw_iopages_ptr(&evq->hw.iobuff);
	evq->evq_ptr = 0;
	evq->evq_mask = (evq->hw.capacity * sizeof(efhw_event_t)) - 1u;

	EFHW_TRACE("%s: [%d] base=%p end=%p", __FUNCTION__, evq->instance,
		   evq->evq_base, evq->evq_base + buf_bytes);

	return 0;
}
Ejemplo n.º 9
0
void
efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
			   efhw_event_t *evp)
{
	unsigned instance = (unsigned)FALCON_EVENT_RX_FLUSH_Q_ID(evp);
	unsigned failed = (int)FALCON_EVENT_RX_FLUSH_FAIL(evp);
	EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);

	if (!h->dmaq_flushed_fn) {
		EFHW_WARN("%s: no handler registered", __FUNCTION__);
		return;
	}

	h->dmaq_flushed_fn(nic, instance, true, failed);
}
Ejemplo n.º 10
0
void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq)
{
	int order = EFHW_GFP_ORDER_TO_NIC_ORDER(get_order(evq->hw.capacity *
							  sizeof(efhw_event_t)));
	EFHW_ASSERT(evq);

	EFHW_TRACE("%s: [%d]", __FUNCTION__, evq->instance);

	/* Zero the timer-value for this queue.
	   And Tell NIC to stop using this event queue. */
	efhw_nic_event_queue_disable(nic, evq->instance,
				     0 /* not used on falcon */);

	/* Free buftable entries */
	efhw_nic_buffer_table_clear(nic, evq->hw.bt_block, 0,
                              1 << order);
	efhw_nic_buffer_table_free(nic, evq->hw.bt_block);

	/* free the pages used by the eventq itself */
	efhw_iopages_free(nic->pci_dev, &evq->hw.iobuff, NULL);
}
Ejemplo n.º 11
0
int efhw_keventq_poll(struct efhw_nic *nic, struct efhw_keventq *q)
{
	efhw_event_t *ev;
	int l, count = 0;

	EFHW_ASSERT(nic);
	EFHW_ASSERT(q);
	EFHW_ASSERT(q->ev_handlers);

	/* Acquire the lock, or mark the queue as needing re-checking. */
	for (;;) {
		l = q->lock;
		if (l == KEVQ_UNLOCKED) {
			if ((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l)
				break;
		} else if (l == KEVQ_LOCKED) {
			if ((int)cmpxchg(&q->lock, l, KEVQ_RECHECK) == l)
				return 0;
		} else {	/* already marked for re-checking */
			EFHW_ASSERT(l == KEVQ_RECHECK);
			return 0;
		}
	}

	if (unlikely(EFHW_EVENT_OVERFLOW(q, q)))
		goto overflow;

	ev = EFHW_EVENT_PTR(q, q, 0);

#ifndef NDEBUG
	if (!EFHW_IS_EVENT(ev))
		EFHW_TRACE("%s: %d NO EVENTS!", __FUNCTION__, q->instance);
#endif

	for (;;) {
		/* Convention for return codes for handlers is:
		 **   0   - no error, event consumed
		 **   1   - no error, event not consumed
		 **   -ve - error,    event not consumed
		 */
		if (likely(EFHW_IS_EVENT(ev))) {
			count++;

			switch (FALCON_EVENT_CODE(ev)) {
			case FALCON_EVENT_CODE_CHAR:
				falcon_handle_char_event(nic, q->ev_handlers,
							 ev);
				break;
			default:
				EFHW_ERR("efhw_keventq_poll: [%d] UNEXPECTED "
					 "EVENT:"FALCON_EVENT_FMT,
					 q->instance,
					 FALCON_EVENT_PRI_ARG(*ev));
			}

			EFHW_CLEAR_EVENT(ev);
			EFHW_EVENTQ_NEXT(q);

			ev = EFHW_EVENT_PTR(q, q, 0);
		} else {
			/* No events left.  Release the lock (checking if we
			 * need to re-poll to avoid race). */
			l = q->lock;
			if (l == KEVQ_LOCKED) {
				if ((int)cmpxchg(&q->lock, l, KEVQ_UNLOCKED)
				    == l) {
					EFHW_TRACE
					    ("efhw_keventq_poll: %d clean exit",
					     q->instance);
					goto clean_exit;
				}
			}

			/* Potentially more work to do. */
			l = q->lock;
			EFHW_ASSERT(l == KEVQ_RECHECK);
			EFHW_TEST((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l);
			EFHW_TRACE("efhw_keventq_poll: %d re-poll required",
				   q->instance);
		}
	}

	/* shouldn't get here */
	EFHW_ASSERT(0);

overflow:
	/* ?? Oh dear.  Should we poll everything that could have possibly
	 ** happened?  Or merely cry out in anguish...
	 */
	EFHW_WARN("efhw_keventq_poll: %d ***** OVERFLOW nic %d *****",
		  q->instance, nic->index);

	q->lock = KEVQ_UNLOCKED;
	return count;

clean_exit:
	/* Ack the processed events so that this event queue can potentially
	   raise interrupts again */
	if (q->instance == nic->interrupting_evq.instance)
		falcon_nic_evq_ack(nic, q->instance,
				   (EFHW_EVENT_OFFSET(q, q, 0)
				    / sizeof(efhw_event_t)));
	return count;
}