Ejemplo n.º 1
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param dma_addrs Array to populate with addrs of allocated pages
 * \param page_order Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
static int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, 
				    dma_addr_t *dma_addrs,
				    unsigned int page_order)
{
	int i, j, rc;

	/* Allocate an iobuffer. */
	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic->pci_dev, &h->iobuff, page_order, NULL,
				0UL);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order) <=
		    EFHW_BUFFER_TABLE_BLOCK_SIZE);

	/* Initialise the buffer table entries. */
	rc = efhw_nic_buffer_table_alloc(nic, 0, 0, &h->bt_block);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate buffer table block",
			  __FUNCTION__);
		efhw_iopages_free(nic->pci_dev, &h->iobuff, NULL);
		return rc;
	}
	for (i = 0; i < (1 << page_order); ++i) {
		for (j = 0; j < EFHW_NIC_PAGES_IN_OS_PAGE; ++j) {
			dma_addrs[i * EFHW_NIC_PAGES_IN_OS_PAGE + j] =
				efhw_iopages_dma_addr(&h->iobuff, i);
		}
	}
	efhw_nic_buffer_table_set(nic, h->bt_block, 0,
				  1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order),
				  dma_addrs);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}
Ejemplo n.º 2
0
void efhw_nic_close_interrupts(struct efhw_nic *nic)
{
	EFHW_ASSERT(nic);
	if (!efhw_nic_have_hw(nic))
		return;

	EFHW_ASSERT(efhw_nic_have_hw(nic));

	if (nic->irq_unit != EFHW_IRQ_UNIT_UNUSED)
		efhw_nic_interrupt_disable(nic);
}
Ejemplo n.º 3
0
int
efhw_keventq_ctor(struct efhw_nic *nic, int instance,
		  struct efhw_keventq *evq,
		  struct efhw_ev_handler *ev_handlers)
{
	unsigned int page_order;
	int rc;
	dma_addr_t dma_addrs[EFHW_BUFFER_TABLE_BLOCK_SIZE];
	unsigned buf_bytes = evq->hw.capacity * sizeof(efhw_event_t);

	EFHW_ASSERT(nic->devtype.arch == EFHW_ARCH_FALCON);

	EFHW_ASSERT(buf_bytes);
	page_order = get_order(buf_bytes);

	evq->instance = instance;
	evq->ev_handlers = ev_handlers;

	/* allocate an IObuffer for the eventq */
	rc = efhw_nic_event_queue_alloc_iobuffer(nic, &evq->hw, evq->instance,
						 dma_addrs, page_order);
	if (rc < 0)
		return rc;

	/* Zero the timer-value for this queue.
	   AND Tell the nic about the event queue. */
	efhw_nic_event_queue_enable(nic, evq->instance, evq->hw.capacity,
				    evq->hw.bt_block->btb_vaddr >>
					EFHW_NIC_PAGE_SHIFT,
				    dma_addrs, 
				    1 << page_order,
				    ev_handlers != NULL /* interrupting */,
				    1 /* dos protection enable*/,
				    0 /* not used on falcon */,
				    0 /* not used on falcon */,
				    0 /* not used on falcon */,
				    NULL /* not used on falcon */,
				    NULL /* not used on falcon */);

	evq->lock = KEVQ_UNLOCKED;
	evq->evq_base = efhw_iopages_ptr(&evq->hw.iobuff);
	evq->evq_ptr = 0;
	evq->evq_mask = (evq->hw.capacity * sizeof(efhw_event_t)) - 1u;

	EFHW_TRACE("%s: [%d] base=%p end=%p", __FUNCTION__, evq->instance,
		   evq->evq_base, evq->evq_base + buf_bytes);

	return 0;
}
Ejemplo n.º 4
0
int
efhw_iopages_alloc(struct efhw_nic *nic, struct efhw_iopages *p,
		   unsigned order)
{
	unsigned bytes = 1u << (order + PAGE_SHIFT);
	struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
	dma_addr_t handle;
	caddr_t addr;
	int gfp_flag;

	/* Set __GFP_COMP if available to make reference counting work.
	 * This is recommended here:
	 *   http://www.forbiddenweb.org/viewtopic.php?id=83167&page=4#348331
	 */
	gfp_flag = ((in_atomic() ? GFP_ATOMIC : GFP_KERNEL) | __GFP_COMP);
	addr = efrm_dma_alloc_coherent(&lnic->pci_dev->dev, bytes, &handle,
				       gfp_flag);
	if (addr == NULL)
		return -ENOMEM;

	EFHW_ASSERT((handle & ~PAGE_MASK) == 0);

	p->order = order;
	p->dma_addr = handle;
	p->kva = addr;

	return 0;
}
Ejemplo n.º 5
0
void efhw_nic_dtor(struct efhw_nic *nic)
{
	EFHW_ASSERT(nic);

	/* Check that we have functional units because the software only
	 * driver doesn't initialise anything hardware related any more */

	/* close interrupts is called first because the act of deregistering
	   the driver could cause this driver to change from master to slave
	   and hence the implicit interrupt mappings would be wrong */

	EFHW_TRACE("%s: functional units ... ", __func__);

	if (efhw_nic_have_functional_units(nic)) {
		efhw_nic_close_interrupts(nic);
		efhw_nic_close_hardware(nic);
	}
	EFHW_TRACE("%s: functional units ... done", __func__);

	/* destroy event queues */
	EFHW_TRACE("%s: event queues ... ", __func__);

	if (nic->interrupting_evq.evq_mask)
		efhw_keventq_dtor(nic, &nic->interrupting_evq);
	if (nic->non_interrupting_evq.evq_mask)
		efhw_keventq_dtor(nic, &nic->non_interrupting_evq);

	EFHW_TRACE("%s: event queues ... done", __func__);

	spin_lock_destroy(&nic->the_reg_lock);

	EFHW_TRACE("%s: DONE", __func__);
}
Ejemplo n.º 6
0
void efhw_iopage_free(struct efhw_nic *nic, struct efhw_iopage *p)
{
	struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
	EFHW_ASSERT(efhw_page_is_valid(&p->p));

	efrm_pci_free_consistent(lnic->pci_dev, PAGE_SIZE,
				 efhw_iopage_ptr(p), p->dma_addr);
}
Ejemplo n.º 7
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param buf_bytes Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, unsigned buf_bytes)
{
	unsigned int page_order;
	int rc;

	/* Allocate an iobuffer. */
	page_order = get_order(buf_bytes);

	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic, &h->iobuff, page_order);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(page_order <= h->buf_tbl_alloc.order);

	/* Initialise the buffer table entries. */
	falcon_nic_buffer_table_set_n(nic, h->buf_tbl_alloc.base,
				      efhw_iopages_dma_addr(&h->iobuff),
				      EFHW_NIC_PAGE_SIZE, 0,
				      1 << page_order, 0);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}
Ejemplo n.º 8
0
void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq)
{
	EFHW_ASSERT(evq);

	EFHW_TRACE("%s: [%d]", __FUNCTION__, evq->instance);

	/* Zero the timer-value for this queue.
	   And Tell NIC to stop using this event queue. */
	efhw_nic_event_queue_disable(nic, evq->instance, 0);

	/* free the pages used by the eventq itself */
	efhw_iopages_free(nic, &evq->hw.iobuff);
}
Ejemplo n.º 9
0
int efhw_iopage_alloc(struct efhw_nic *nic, struct efhw_iopage *p)
{
	struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
	dma_addr_t handle;
	void *kva;

	kva = efrm_pci_alloc_consistent(lnic->pci_dev, PAGE_SIZE,
					&handle);
	if (kva == 0)
		return -ENOMEM;

	EFHW_ASSERT((handle & ~PAGE_MASK) == 0);

	memset((void *)kva, 0, PAGE_SIZE);
	efhw_page_init_from_va(&p->p, kva);

	p->dma_addr = handle;

	return 0;
}
Ejemplo n.º 10
0
void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq)
{
	int order = EFHW_GFP_ORDER_TO_NIC_ORDER(get_order(evq->hw.capacity *
							  sizeof(efhw_event_t)));
	EFHW_ASSERT(evq);

	EFHW_TRACE("%s: [%d]", __FUNCTION__, evq->instance);

	/* Zero the timer-value for this queue.
	   And Tell NIC to stop using this event queue. */
	efhw_nic_event_queue_disable(nic, evq->instance,
				     0 /* not used on falcon */);

	/* Free buftable entries */
	efhw_nic_buffer_table_clear(nic, evq->hw.bt_block, 0,
                              1 << order);
	efhw_nic_buffer_table_free(nic, evq->hw.bt_block);

	/* free the pages used by the eventq itself */
	efhw_iopages_free(nic->pci_dev, &evq->hw.iobuff, NULL);
}
Ejemplo n.º 11
0
/* make this separate from initialising data structure
** to allow this to be called at a later time once we can access PCI
** config space to find out what hardware we have
*/
void efhw_nic_init(struct efhw_nic *nic, unsigned flags, unsigned options,
		   struct efhw_device_type dev_type)
{
	nic->devtype = dev_type;
	nic->flags = flags;
	nic->options = options;
	nic->bar_ioaddr = 0;
	spin_lock_init(&nic->the_reg_lock);
	nic->reg_lock = &nic->the_reg_lock;
	nic->mtu = 1500 + ETH_HLEN;

	nic->irq_unit = EFHW_IRQ_UNIT_UNUSED;

	nic->evq_sizes = 512 | 1024 | 2048 | 4096 | 8192 |
		16384 | 32768;
	nic->txq_sizes = 512 | 1024 | 2048 | 4096;
	nic->rxq_sizes = 512 | 1024 | 2048 | 4096;
	nic->efhw_func = &falcon_char_functional_units;
	nic->ctr_ap_bytes = EFHW_64M;
	switch (nic->devtype.variant) {
	case 'A':
		nic->ctr_ap_bar = FALCON_S_CTR_AP_BAR;
		nic->num_evqs   = 4096;
		nic->num_dmaqs  = 4096;
		nic->num_timers = 4096;
		break;
	case 'B':
		nic->flags |= NIC_FLAG_NO_INTERRUPT;
		nic->ctr_ap_bar = FALCON_P_CTR_AP_BAR;
		nic->num_evqs   = 4096;
		nic->num_dmaqs  = 4096;
		nic->num_timers = 4096;
		break;
	default:
		EFHW_ASSERT(0);
		break;
	}
}
Ejemplo n.º 12
0
int efhw_keventq_poll(struct efhw_nic *nic, struct efhw_keventq *q)
{
	efhw_event_t *ev;
	int l, count = 0;

	EFHW_ASSERT(nic);
	EFHW_ASSERT(q);
	EFHW_ASSERT(q->ev_handlers);

	/* Acquire the lock, or mark the queue as needing re-checking. */
	for (;;) {
		l = q->lock;
		if (l == KEVQ_UNLOCKED) {
			if ((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l)
				break;
		} else if (l == KEVQ_LOCKED) {
			if ((int)cmpxchg(&q->lock, l, KEVQ_RECHECK) == l)
				return 0;
		} else {	/* already marked for re-checking */
			EFHW_ASSERT(l == KEVQ_RECHECK);
			return 0;
		}
	}

	if (unlikely(EFHW_EVENT_OVERFLOW(q, q)))
		goto overflow;

	ev = EFHW_EVENT_PTR(q, q, 0);

#ifndef NDEBUG
	if (!EFHW_IS_EVENT(ev))
		EFHW_TRACE("%s: %d NO EVENTS!", __FUNCTION__, q->instance);
#endif

	for (;;) {
		/* Convention for return codes for handlers is:
		 **   0   - no error, event consumed
		 **   1   - no error, event not consumed
		 **   -ve - error,    event not consumed
		 */
		if (likely(EFHW_IS_EVENT(ev))) {
			count++;

			switch (FALCON_EVENT_CODE(ev)) {
			case FALCON_EVENT_CODE_CHAR:
				falcon_handle_char_event(nic, q->ev_handlers,
							 ev);
				break;
			default:
				EFHW_ERR("efhw_keventq_poll: [%d] UNEXPECTED "
					 "EVENT:"FALCON_EVENT_FMT,
					 q->instance,
					 FALCON_EVENT_PRI_ARG(*ev));
			}

			EFHW_CLEAR_EVENT(ev);
			EFHW_EVENTQ_NEXT(q);

			ev = EFHW_EVENT_PTR(q, q, 0);
		} else {
			/* No events left.  Release the lock (checking if we
			 * need to re-poll to avoid race). */
			l = q->lock;
			if (l == KEVQ_LOCKED) {
				if ((int)cmpxchg(&q->lock, l, KEVQ_UNLOCKED)
				    == l) {
					EFHW_TRACE
					    ("efhw_keventq_poll: %d clean exit",
					     q->instance);
					goto clean_exit;
				}
			}

			/* Potentially more work to do. */
			l = q->lock;
			EFHW_ASSERT(l == KEVQ_RECHECK);
			EFHW_TEST((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l);
			EFHW_TRACE("efhw_keventq_poll: %d re-poll required",
				   q->instance);
		}
	}

	/* shouldn't get here */
	EFHW_ASSERT(0);

overflow:
	/* ?? Oh dear.  Should we poll everything that could have possibly
	 ** happened?  Or merely cry out in anguish...
	 */
	EFHW_WARN("efhw_keventq_poll: %d ***** OVERFLOW nic %d *****",
		  q->instance, nic->index);

	q->lock = KEVQ_UNLOCKED;
	return count;

clean_exit:
	/* Ack the processed events so that this event queue can potentially
	   raise interrupts again */
	if (q->instance == nic->interrupting_evq.instance)
		falcon_nic_evq_ack(nic, q->instance,
				   (EFHW_EVENT_OFFSET(q, q, 0)
				    / sizeof(efhw_event_t)));
	return count;
}