Пример #1
0
int
efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages,
		     int n_pages, struct efx_vi_dma_map_state **dmh_out)
{
	struct efx_vi_state *efx_state = vih;
	int order = fls(n_pages - 1), rc, i, evq_id;
	dma_addr_t dma_addr;
	struct efx_vi_dma_map_state *dm_state;

	if (n_pages != (1 << order)) {
		EFRM_WARN("%s: Can only allocate buffers in power of 2 "
			  "sizes (not %d)", __func__, n_pages);
		return -EINVAL;
	}

	dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL);
	if (!dm_state)
		return -ENOMEM;

	dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages,
				      GFP_KERNEL);
	if (!dm_state->dma_addrs) {
		kfree(dm_state);
		return -ENOMEM;
	}

	rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle);
	if (rc < 0) {
		kfree(dm_state->dma_addrs);
		kfree(dm_state);
		return rc;
	}

	evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle);
	for (i = 0; i < n_pages; i++) {
		/* TODO do we need to get_page() here ? */

		dma_addr = pci_map_page(linux_efhw_nic(efx_state->nic)->
					  pci_dev, pages[i], 0, PAGE_SIZE,
					PCI_DMA_TODEVICE);

		efrm_buffer_table_set(&dm_state->bt_handle, efx_state->nic,
				      i, dma_addr, evq_id);

		dm_state->dma_addrs[i] = dma_addr;

		/* Would be nice to not have to call commit each time, but
		 * comment says there are hardware restrictions on how often
		 * you can go without it, so do this to be safe */
		efrm_buffer_table_commit();
	}

	dm_state->n_pages = n_pages;

	*dmh_out = dm_state;

	return 0;
}
Пример #2
0
int
efrm_filter_resource_alloc(struct vi_resource *vi_parent,
			   struct filter_resource **frs_out)
{
	struct filter_resource *frs;
	int rc, instance;

	EFRM_ASSERT(frs_out);
	EFRM_ASSERT(efrm_filter_manager);
	EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm);
	EFRM_ASSERT(vi_parent != NULL);
	EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_parent->rs.rs_handle) ==
		    EFRM_RESOURCE_VI);

	/* Allocate resource data structure.  This is called in atomic
	 * context by the onload driver.
	 */
	frs = kmalloc(sizeof(struct filter_resource), GFP_ATOMIC);
	if (!frs)
		return -ENOMEM;

	/* Allocate an instance. */
	rc = kfifo_get(efrm_filter_manager->free_ids,
		       (unsigned char *)&instance, sizeof(instance));
	if (rc != sizeof(instance)) {
		EFRM_TRACE("%s: out of instances", __FUNCTION__);
		EFRM_ASSERT(rc == 0);
		rc = -EBUSY;
		goto fail1;
	}

	/* Initialise the resource DS. */
	efrm_resource_init(&frs->rs, EFRM_RESOURCE_FILTER, instance);
	frs->pt = vi_parent;
	efrm_resource_ref(&frs->pt->rs);
	frs->filter_idx = -1;

	EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " VI %d", __FUNCTION__,
		   EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle),
		   EFRM_RESOURCE_INSTANCE(vi_parent->rs.rs_handle));

	efrm_client_add_resource(vi_parent->rs.rs_client, &frs->rs);
	*frs_out = frs;
	return 0;

fail1:
	memset(frs, 0, sizeof(*frs));
	kfree(frs);
	return rc;
}
Пример #3
0
void
efrm_filter_resource_redirect(struct filter_resource *frs,
			      struct vi_resource *vi)
{
	struct efhw_nic *nic = frs->rs.rs_client->nic;
	int vi_instance;

	EFRM_ASSERT(frs != NULL);
	EFRM_ASSERT(vi != NULL);

	vi_instance = EFRM_RESOURCE_INSTANCE(vi->rs.rs_handle);
	if( frs->filter_idx >= 0 )
		efhw_nic_ipfilter_redirect(nic, frs->filter_idx, vi_instance);
	efrm_vi_resource_release(frs->pt);
	frs->pt = vi;
	efrm_resource_ref(&frs->pt->rs);
}
Пример #4
0
int
__efrm_filter_resource_set(struct filter_resource *frs, int type,
			   unsigned saddr, uint16_t sport,
			   unsigned daddr, uint16_t dport)
{
	struct efhw_nic *nic = frs->rs.rs_client->nic;
	int vi_instance;

	EFRM_ASSERT(frs);

	if (efrm_nic_tablep->a_nic->devtype.variant >= 'B' &&
	    (frs->pt->flags & EFHW_VI_JUMBO_EN) == 0)
		type |= EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK;
	vi_instance = EFRM_RESOURCE_INSTANCE(frs->pt->rs.rs_handle);

	return efhw_nic_ipfilter_set(nic, type, &frs->filter_idx,
				     vi_instance, saddr, sport, daddr, dport);
}
Пример #5
0
void efrm_filter_resource_free(struct filter_resource *frs)
{
	struct efhw_nic *nic = frs->rs.rs_client->nic;
	int id;

	EFRM_RESOURCE_ASSERT_VALID(&frs->rs, 1);

	EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __func__,
		   EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle));

	efhw_nic_ipfilter_clear(nic, frs->filter_idx);
	frs->filter_idx = -1;
	efrm_vi_resource_release(frs->pt);

	/* Free this filter. */
	id = EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle);
	EFRM_VERIFY_EQ(kfifo_put(efrm_filter_manager->free_ids,
				 (unsigned char *)&id, sizeof(id)),
		       sizeof(id));

	efrm_client_put(frs->rs.rs_client);
	EFRM_DO_DEBUG(memset(frs, 0, sizeof(*frs)));
	kfree(frs);
}
Пример #6
0
int efrm_filter_resource_instance(struct filter_resource *frs)
{
	return EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle);
}
Пример #7
0
int
efx_vi_hw_resource_get_phys(struct efx_vi_state *vih,
			    struct efx_vi_hw_resource_metadata *mdata,
			    struct efx_vi_hw_resource *hw_res_array,
			    int *length)
{
	struct efx_vi_state *efx_state = vih;
	struct linux_efhw_nic *lnic = linux_efhw_nic(efx_state->nic);
	unsigned long phys = lnic->ctr_ap_pci_addr;
	struct efrm_resource *ep_res = &efx_state->vi_res->rs;
	unsigned ep_mmap_bytes;
	int i;

	if (*length < EFX_VI_HW_RESOURCE_MAXSIZE)
		return -EINVAL;

	mdata->nic_arch = efx_state->nic->devtype.arch;
	mdata->nic_variant = efx_state->nic->devtype.variant;
	mdata->nic_revision = efx_state->nic->devtype.revision;

	mdata->evq_order =
	    efx_state->vi_res->nic_info.evq_pages.iobuff.order;
	mdata->evq_offs = efx_state->vi_res->nic_info.evq_pages.iobuff_off;
	mdata->evq_capacity = efx_vi_eventq_size;
	mdata->instance = EFRM_RESOURCE_INSTANCE(ep_res->rs_handle);
	mdata->rx_capacity = FALCON_DMA_Q_DEFAULT_RX_SIZE;
	mdata->tx_capacity = FALCON_DMA_Q_DEFAULT_TX_SIZE;

	ep_mmap_bytes = FALCON_DMA_Q_DEFAULT_MMAP;
	EFRM_ASSERT(ep_mmap_bytes == PAGE_SIZE * 2);

#ifndef NDEBUG
	{
		/* Sanity about doorbells */
		unsigned long tx_dma_page_addr, rx_dma_page_addr;

		/* get rx doorbell address */
		rx_dma_page_addr =
		    phys + falcon_rx_dma_page_addr(mdata->instance);
		/* get tx doorbell address */
		tx_dma_page_addr =
		    phys + falcon_tx_dma_page_addr(mdata->instance);

		/* Check the lower bits of the TX doorbell will be
		 * consistent. */
		EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST &
			     FALCON_DMA_PAGE_MASK) ==
			    (TX_DESC_UPD_REG_PAGE123K_OFST &
			     FALCON_DMA_PAGE_MASK));

		/* Check the lower bits of the RX doorbell will be
		 * consistent. */
		EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST &
			     FALCON_DMA_PAGE_MASK) ==
			    (RX_DESC_UPD_REG_PAGE123K_OFST &
			     FALCON_DMA_PAGE_MASK));

		/* Check that the doorbells will be in the same page. */
		EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK) ==
			    (RX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK));

		/* Check that the doorbells are in the same page. */
		EFRM_ASSERT((tx_dma_page_addr & PAGE_MASK) ==
			    (rx_dma_page_addr & PAGE_MASK));

		/* Check that the TX doorbell offset is correct. */
		EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) ==
			    (tx_dma_page_addr & ~PAGE_MASK));

		/* Check that the RX doorbell offset is correct. */
		EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) ==
			    (rx_dma_page_addr & ~PAGE_MASK));
	}
#endif

	i = 0;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_TXDMAQ;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address =
		(unsigned long)efx_state->vi_res->nic_info.
			dmaq_pages[EFRM_VI_RM_DMA_QUEUE_TX].kva;

	i++;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_RXDMAQ;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address =
		(unsigned long)efx_state->vi_res->nic_info.
			dmaq_pages[EFRM_VI_RM_DMA_QUEUE_RX].kva;

	i++;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQTIMER;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address =
		(unsigned long)phys + falcon_timer_page_addr(mdata->instance);

	/* NB EFX_VI_HW_RESOURCE_EVQPTR not used on Falcon */

	i++;
	switch (efx_state->nic->devtype.variant) {
	case 'A':
		hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR;
		hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
		hw_res_array[i].more_to_follow = 0;
		hw_res_array[i].length = PAGE_SIZE;
		hw_res_array[i].address = (unsigned long)phys +
			EVQ_RPTR_REG_OFST +
			(FALCON_REGISTER128 * mdata->instance);
		break;
	case 'B':
		hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET;
		hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
		hw_res_array[i].more_to_follow = 0;
		hw_res_array[i].length = PAGE_SIZE;
		hw_res_array[i].address =
			(unsigned long)FALCON_EVQ_RPTR_REG_P0;
		break;
	default:
		EFRM_ASSERT(0);
		break;
	}

	i++;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQMEMKVA;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_IOBUFFER;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address = (unsigned long)efx_state->vi_res->
		nic_info.evq_pages.iobuff.kva;

	i++;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_BELLPAGE;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address =
		(unsigned long)(phys +
				falcon_tx_dma_page_addr(mdata->instance))
		>> PAGE_SHIFT;

	i++;

	EFRM_ASSERT(i <= *length);

	*length = i;

	return 0;
}
Пример #8
0
/* Function needed as Xen can't get pages for grants in dom0, but can
   get dma address */
int
efx_vi_dma_map_addrs(struct efx_vi_state *vih,
		     unsigned long long *bus_dev_addrs,
		     int n_pages, struct efx_vi_dma_map_state **dmh_out)
{
	struct efx_vi_state *efx_state = vih;
	int order = fls(n_pages - 1), rc, i, evq_id;
	dma_addr_t dma_addr;
	struct efx_vi_dma_map_state *dm_state;

	if (n_pages != (1 << order)) {
		EFRM_WARN("%s: Can only allocate buffers in power of 2 "
			  "sizes (not %d)", __func__, n_pages);
		return -EINVAL;
	}

	dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL);
	if (!dm_state)
		return -ENOMEM;

	dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages,
				      GFP_KERNEL);
	if (!dm_state->dma_addrs) {
		kfree(dm_state);
		return -ENOMEM;
	}

	rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle);
	if (rc < 0) {
		kfree(dm_state->dma_addrs);
		kfree(dm_state);
		return rc;
	}

	evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle);
#if 0
	EFRM_WARN("%s: mapping %d pages to evq %d, bt_ids %d-%d\n",
		  __func__, n_pages, evq_id,
		  dm_state->bt_handle.base,
		  dm_state->bt_handle.base + n_pages);
#endif
	for (i = 0; i < n_pages; i++) {

		dma_addr = (dma_addr_t)bus_dev_addrs[i];

		efrm_buffer_table_set(&dm_state->bt_handle, efx_state->nic,
				      i, dma_addr, evq_id);

		dm_state->dma_addrs[i] = dma_addr;

		/* Would be nice to not have to call commit each time, but
		 * comment says there are hardware restrictions on how often
		 * you can go without it, so do this to be safe */
		efrm_buffer_table_commit();
	}

	dm_state->n_pages = n_pages;

	*dmh_out = dm_state;

	return 0;
}