Beispiel #1
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param dma_addrs Array to populate with addrs of allocated pages
 * \param page_order Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
static int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, 
				    dma_addr_t *dma_addrs,
				    unsigned int page_order)
{
	int i, j, rc;

	/* Allocate an iobuffer. */
	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic->pci_dev, &h->iobuff, page_order, NULL,
				0UL);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order) <=
		    EFHW_BUFFER_TABLE_BLOCK_SIZE);

	/* Initialise the buffer table entries. */
	rc = efhw_nic_buffer_table_alloc(nic, 0, 0, &h->bt_block);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate buffer table block",
			  __FUNCTION__);
		efhw_iopages_free(nic->pci_dev, &h->iobuff, NULL);
		return rc;
	}
	for (i = 0; i < (1 << page_order); ++i) {
		for (j = 0; j < EFHW_NIC_PAGES_IN_OS_PAGE; ++j) {
			dma_addrs[i * EFHW_NIC_PAGES_IN_OS_PAGE + j] =
				efhw_iopages_dma_addr(&h->iobuff, i);
		}
	}
	efhw_nic_buffer_table_set(nic, h->bt_block, 0,
				  1 << EFHW_GFP_ORDER_TO_NIC_ORDER(page_order),
				  dma_addrs);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}
Beispiel #2
0
/*! Helper function to allocate the iobuffer needed by an eventq
 *   - it ensures the eventq has the correct alignment for the NIC
 *
 * \param rm        Event-queue resource manager
 * \param instance  Event-queue instance (index)
 * \param buf_bytes Requested size of eventq
 * \return          < 0 if iobuffer allocation fails
 */
int
efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
				    struct eventq_resource_hardware *h,
				    int evq_instance, unsigned buf_bytes)
{
	unsigned int page_order;
	int rc;

	/* Allocate an iobuffer. */
	page_order = get_order(buf_bytes);

	EFHW_TRACE("allocating eventq size %x",
		   1u << (page_order + PAGE_SHIFT));
	rc = efhw_iopages_alloc(nic, &h->iobuff, page_order);
	if (rc < 0) {
		EFHW_WARN("%s: failed to allocate %u pages",
			  __FUNCTION__, 1u << page_order);
		return rc;
	}

	/* Set the eventq pages to match EFHW_CLEAR_EVENT() */
	if (EFHW_CLEAR_EVENT_VALUE)
		memset(efhw_iopages_ptr(&h->iobuff),
		       EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);

	EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));

	/* For Falcon the NIC is programmed with the base buffer address of a
	 * contiguous region of buffer space. This means that larger than a
	 * PAGE event queues can be expected to allocate even when the host's
	 * physical memory is fragmented */
	EFHW_ASSERT(efhw_nic_have_hw(nic));
	EFHW_ASSERT(page_order <= h->buf_tbl_alloc.order);

	/* Initialise the buffer table entries. */
	falcon_nic_buffer_table_set_n(nic, h->buf_tbl_alloc.base,
				      efhw_iopages_dma_addr(&h->iobuff),
				      EFHW_NIC_PAGE_SIZE, 0,
				      1 << page_order, 0);
	falcon_nic_buffer_table_confirm(nic);
	return 0;
}