Exemple #1
0
int efhw_keventq_poll(struct efhw_nic *nic, struct efhw_keventq *q)
{
	efhw_event_t *ev;
	int l, count = 0;

	EFHW_ASSERT(nic);
	EFHW_ASSERT(q);
	EFHW_ASSERT(q->ev_handlers);

	/* Acquire the lock, or mark the queue as needing re-checking. */
	for (;;) {
		l = q->lock;
		if (l == KEVQ_UNLOCKED) {
			if ((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l)
				break;
		} else if (l == KEVQ_LOCKED) {
			if ((int)cmpxchg(&q->lock, l, KEVQ_RECHECK) == l)
				return 0;
		} else {	/* already marked for re-checking */
			EFHW_ASSERT(l == KEVQ_RECHECK);
			return 0;
		}
	}

	if (unlikely(EFHW_EVENT_OVERFLOW(q, q)))
		goto overflow;

	ev = EFHW_EVENT_PTR(q, q, 0);

#ifndef NDEBUG
	if (!EFHW_IS_EVENT(ev))
		EFHW_TRACE("%s: %d NO EVENTS!", __FUNCTION__, q->instance);
#endif

	for (;;) {
		/* Convention for return codes for handlers is:
		 **   0   - no error, event consumed
		 **   1   - no error, event not consumed
		 **   -ve - error,    event not consumed
		 */
		if (likely(EFHW_IS_EVENT(ev))) {
			count++;

			switch (FALCON_EVENT_CODE(ev)) {
			case FALCON_EVENT_CODE_CHAR:
				falcon_handle_char_event(nic, q->ev_handlers,
							 ev);
				break;
			default:
				EFHW_ERR("efhw_keventq_poll: [%d] UNEXPECTED "
					 "EVENT:"FALCON_EVENT_FMT,
					 q->instance,
					 FALCON_EVENT_PRI_ARG(*ev));
			}

			EFHW_CLEAR_EVENT(ev);
			EFHW_EVENTQ_NEXT(q);

			ev = EFHW_EVENT_PTR(q, q, 0);
		} else {
			/* No events left.  Release the lock (checking if we
			 * need to re-poll to avoid race). */
			l = q->lock;
			if (l == KEVQ_LOCKED) {
				if ((int)cmpxchg(&q->lock, l, KEVQ_UNLOCKED)
				    == l) {
					EFHW_TRACE
					    ("efhw_keventq_poll: %d clean exit",
					     q->instance);
					goto clean_exit;
				}
			}

			/* Potentially more work to do. */
			l = q->lock;
			EFHW_ASSERT(l == KEVQ_RECHECK);
			EFHW_TEST((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l);
			EFHW_TRACE("efhw_keventq_poll: %d re-poll required",
				   q->instance);
		}
	}

	/* shouldn't get here */
	EFHW_ASSERT(0);

overflow:
	/* ?? Oh dear.  Should we poll everything that could have possibly
	 ** happened?  Or merely cry out in anguish...
	 */
	EFHW_WARN("efhw_keventq_poll: %d ***** OVERFLOW nic %d *****",
		  q->instance, nic->index);

	q->lock = KEVQ_UNLOCKED;
	return count;

clean_exit:
	/* Ack the processed events so that this event queue can potentially
	   raise interrupts again */
	if (q->instance == nic->interrupting_evq.instance)
		falcon_nic_evq_ack(nic, q->instance,
				   (EFHW_EVENT_OFFSET(q, q, 0)
				    / sizeof(efhw_event_t)));
	return count;
}
Exemple #2
0
int
efhw_iopages_alloc(struct pci_dev *pci_dev, struct efhw_iopages *p,
		   unsigned order, efhw_iommu_domain *vf_domain,
		   unsigned long iova_base)
{
	/* dma_alloc_coherent() is really the right interface to use here.
	 * However, it allocates memory "close" to the device, but we want
	 * memory on the current numa node.  Also we need the memory to be
	 * contiguous in the kernel, but not necessarily in physical
	 * memory.
	 *
	 * vf_domain is the IOMMU protection domain - it imples that pci_dev
	 * is a VF that should not use the normal DMA mapping APIs
	 */
	struct device *dev = &pci_dev->dev;
	int i = 0;

	p->n_pages = 1 << order;
	p->dma_addrs = kmalloc(p->n_pages * sizeof(p->dma_addrs[0]), 0);
	if (p->dma_addrs == NULL)
		goto fail1;
	p->ptr = vmalloc_node(p->n_pages << PAGE_SHIFT, -1);
	if (p->ptr == NULL)
		goto fail2;
	for (i = 0; i < p->n_pages; ++i) {
		struct page *page;
		page = vmalloc_to_page(p->ptr + (i << PAGE_SHIFT));

		if (!vf_domain) {
			p->dma_addrs[i] = dma_map_page(dev, page, 0, PAGE_SIZE,
						       DMA_BIDIRECTIONAL);
			
			if (dma_mapping_error(dev, p->dma_addrs[i])) {
				EFHW_ERR("%s: ERROR dma_map_page failed",
					 __FUNCTION__);
				goto fail3;
			}
		} else
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
		{
			int rc;

			p->dma_addrs[i] = iova_base;
			rc = iommu_map(vf_domain, p->dma_addrs[i],
				       page_to_phys(page), PAGE_SIZE,
				       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
			if (rc) {
				EFHW_ERR("%s: ERROR iommu_map failed (%d)",
					 __FUNCTION__, rc);
				goto fail3;
			}
			iova_base += PAGE_SIZE;
		}
#else
		EFRM_ASSERT(0);
#endif
	}
	return 0;

fail3:
	while (i-- > 0)
		if (!vf_domain) {
			dma_unmap_page(dev, p->dma_addrs[i],
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
		} else {
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
			mutex_lock(&efrm_iommu_mutex);
			iommu_unmap(vf_domain, iova_base, PAGE_SIZE);
			mutex_unlock(&efrm_iommu_mutex);
#endif
		}
fail2:
	kfree(p->dma_addrs);
fail1:
	return -ENOMEM;
}