Example #1
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param dma_addrs Array to populate with addrs of allocated pages
 * \param page_order Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
static int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, 
				    dma_addr_t *dma_addrs,
				    unsigned int page_order)
{
	int i, j, rc;

	/* Allocate an iobuffer. */
	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic->pci_dev, &h->iobuff, page_order, NULL,
				0UL);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order) <=
		    EFHW_BUFFER_TABLE_BLOCK_SIZE);

	/* Initialise the buffer table entries. */
	rc = efhw_nic_buffer_table_alloc(nic, 0, 0, &h->bt_block);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate buffer table block",
			  __FUNCTION__);
		efhw_iopages_free(nic->pci_dev, &h->iobuff, NULL);
		return rc;
	}
	for (i = 0; i < (1 << page_order); ++i) {
		for (j = 0; j < EFHW_NIC_PAGES_IN_OS_PAGE; ++j) {
			dma_addrs[i * EFHW_NIC_PAGES_IN_OS_PAGE + j] =
				efhw_iopages_dma_addr(&h->iobuff, i);
		}
	}
	efhw_nic_buffer_table_set(nic, h->bt_block, 0,
				  1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order),
				  dma_addrs);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}
Example #2
0
void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq)
{
	EFHW_ASSERT(evq);

	EFHW_TRACE("%s: [%d]", __FUNCTION__, evq->instance);

	/* Zero the timer-value for this queue.
	   And Tell NIC to stop using this event queue. */
	efhw_nic_event_queue_disable(nic, evq->instance, 0);

	/* free the pages used by the eventq itself */
	efhw_iopages_free(nic, &evq->hw.iobuff);
}
Example #3
0
void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq)
{
	int order = EFHW_GFP_ORDER_TO_NIC_ORDER(get_order(evq->hw.capacity *
							  sizeof(efhw_event_t)));
	EFHW_ASSERT(evq);

	EFHW_TRACE("%s: [%d]", __FUNCTION__, evq->instance);

	/* Zero the timer-value for this queue.
	   And Tell NIC to stop using this event queue. */
	efhw_nic_event_queue_disable(nic, evq->instance,
				     0 /* not used on falcon */);

	/* Free buftable entries */
	efhw_nic_buffer_table_clear(nic, evq->hw.bt_block, 0,
                              1 << order);
	efhw_nic_buffer_table_free(nic, evq->hw.bt_block);

	/* free the pages used by the eventq itself */
	efhw_iopages_free(nic->pci_dev, &evq->hw.iobuff, NULL);
}