Пример #1
0
int
efrm_filter_resource_alloc(struct vi_resource *vi_parent,
			   struct filter_resource **frs_out)
{
	struct filter_resource *frs;
	int rc, instance;

	EFRM_ASSERT(frs_out);
	EFRM_ASSERT(efrm_filter_manager);
	EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm);
	EFRM_ASSERT(vi_parent != NULL);
	EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_parent->rs.rs_handle) ==
		    EFRM_RESOURCE_VI);

	/* Allocate resource data structure.  This is called in atomic
	 * context by the onload driver.
	 */
	frs = kmalloc(sizeof(struct filter_resource), GFP_ATOMIC);
	if (!frs)
		return -ENOMEM;

	/* Allocate an instance. */
	rc = kfifo_get(efrm_filter_manager->free_ids,
		       (unsigned char *)&instance, sizeof(instance));
	if (rc != sizeof(instance)) {
		EFRM_TRACE("%s: out of instances", __FUNCTION__);
		EFRM_ASSERT(rc == 0);
		rc = -EBUSY;
		goto fail1;
	}

	/* Initialise the resource DS. */
	efrm_resource_init(&frs->rs, EFRM_RESOURCE_FILTER, instance);
	frs->pt = vi_parent;
	efrm_resource_ref(&frs->pt->rs);
	frs->filter_idx = -1;

	EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " VI %d", __FUNCTION__,
		   EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle),
		   EFRM_RESOURCE_INSTANCE(vi_parent->rs.rs_handle));

	efrm_client_add_resource(vi_parent->rs.rs_client, &frs->rs);
	*frs_out = frs;
	return 0;

fail1:
	memset(frs, 0, sizeof(*frs));
	kfree(frs);
	return rc;
}
Пример #2
0
void
efrm_filter_resource_redirect(struct filter_resource *frs,
			      struct vi_resource *vi)
{
	struct efhw_nic *nic = frs->rs.rs_client->nic;
	int vi_instance;

	EFRM_ASSERT(frs != NULL);
	EFRM_ASSERT(vi != NULL);

	vi_instance = EFRM_RESOURCE_INSTANCE(vi->rs.rs_handle);
	if( frs->filter_idx >= 0 )
		efhw_nic_ipfilter_redirect(nic, frs->filter_idx, vi_instance);
	efrm_vi_resource_release(frs->pt);
	frs->pt = vi;
	efrm_resource_ref(&frs->pt->rs);
}
Пример #3
0
int efrm_pd_alloc(struct efrm_pd **pd_out, struct efrm_client *client_opt,
		  struct efrm_vf *vf_opt, int flags)
{
	struct efrm_pd *pd;
	int rc, instance;
	struct efrm_pd_owner_ids *owner_ids;
	int orders_num = 0;


	EFRM_ASSERT((client_opt != NULL) || (vf_opt != NULL));
	if ((flags &
	    ~(EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE |
	    EFRM_PD_ALLOC_FLAG_HW_LOOPBACK)) != 0) {
		rc = -EINVAL;
		goto fail1;
	}

	if (!(flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE)) {
		orders_num = efhw_nic_buffer_table_orders_num(
						client_opt->nic);
		EFRM_ASSERT(orders_num);
		EFRM_ASSERT(efhw_nic_buffer_table_orders(
						client_opt->nic)[0] == 0);
	}
	pd = kmalloc(sizeof(*pd) + orders_num * sizeof(pd->bt_managers[0]),
		     GFP_KERNEL);
	if (pd == NULL) {
		rc = -ENOMEM;
		goto fail1;
	}
	pd->stack_id = 0;

	spin_lock_bh(&pd_manager->rm.rm_lock);
	instance = pd_manager->next_instance++;
	if (flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE) {
		pd->owner_id = OWNER_ID_PHYS_MODE;
	}
	else {
#ifdef CONFIG_SFC_RESOURCE_VF
		if (vf_opt != NULL)
			owner_ids = vf_opt->owner_ids;
		else
#endif
		owner_ids = efrm_nic_from_client(client_opt)->owner_ids;
		EFRM_ASSERT(owner_ids != NULL);
		pd->owner_id = efrm_pd_owner_id_alloc(owner_ids);
	}
	spin_unlock_bh(&pd_manager->rm.rm_lock);
	if (pd->owner_id == OWNER_ID_ALLOC_FAIL) {
		rc = -EBUSY;
		goto fail2;
	}
#ifdef CONFIG_SFC_RESOURCE_VF
	pd->vf = vf_opt;
	if (pd->vf != NULL) {
		struct efrm_resource *vfrs = efrm_vf_to_resource(pd->vf);
		efrm_resource_ref(vfrs);
		client_opt = vfrs->rs_client;
	}
#endif
	if (!(flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE)) {
		int ord;
		for (ord = 0; ord < orders_num; ord++) {
			efrm_bt_manager_ctor(
				&pd->bt_managers[ord], pd->owner_id,
				efhw_nic_buffer_table_orders(
						client_opt->nic)[ord]
				);
		}
	}
	efrm_resource_init(&pd->rs, EFRM_RESOURCE_PD, instance);
	efrm_client_add_resource(client_opt, &pd->rs);

	pd->os_data = efrm_pd_os_stats_ctor(pd);
	pd->min_nic_order = 0;

#if EFX_DRIVERLINK_API_VERSION < 25
	pd->vport_id = EFRM_PD_VPORT_ID_NONE;
#else
	pd->vport_handle = EFRM_PD_VPORT_ID_NONE;
#endif
	mutex_init(&pd->remap_lock);
	if (flags & EFRM_PD_ALLOC_FLAG_HW_LOOPBACK) {
		if ((rc = efrm_pd_stack_id_alloc(pd)) != 0) {
			efrm_pd_release(pd);
			return rc;
		}
	}

	*pd_out = pd;
	return 0;


fail2:
	kfree(pd);
fail1:
	return rc;
}
Пример #4
0
int
oo_iobufset_resource_alloc(struct oo_buffer_pages * pages, struct efrm_pd *pd,
                           struct oo_iobufset **iobrs_out, uint64_t *hw_addrs,
                           int reset_pending)
{
  struct oo_iobufset *iobrs;
  int rc;
  int gfp_flag = (in_atomic() || in_interrupt()) ? GFP_ATOMIC : GFP_KERNEL;
  int size = sizeof(struct oo_iobufset) + pages->n_bufs * sizeof(dma_addr_t);
  int nic_order;
  void **addrs;
  unsigned int i;

  ci_assert(iobrs_out);
  ci_assert(pd);

  if( size <= PAGE_SIZE ) {
    iobrs = kmalloc(size, gfp_flag);
    if( iobrs == NULL )
      return -ENOMEM;
    iobrs->dma_addrs = (void *)(iobrs + 1);
  }
  else {
    /* Avoid multi-page allocations */
    iobrs = kmalloc(sizeof(struct oo_iobufset), gfp_flag);
    if( iobrs == NULL )
      return -ENOMEM;
    ci_assert_le(pages->n_bufs * sizeof(dma_addr_t), PAGE_SIZE);
    iobrs->dma_addrs = kmalloc(pages->n_bufs * sizeof(dma_addr_t), gfp_flag);
    if( iobrs->dma_addrs == NULL ) {
      kfree(iobrs);
      return -ENOMEM;
    }

  }

  oo_atomic_set(&iobrs->ref_count, 1);
  iobrs->pd = pd;
  iobrs->pages = pages;

  nic_order = EFHW_GFP_ORDER_TO_NIC_ORDER(compound_order(pages->pages[0]));

  ci_assert_le(sizeof(void *) * pages->n_bufs, PAGE_SIZE);
  addrs = kmalloc(sizeof(void *) * pages->n_bufs, gfp_flag);
  if (addrs == NULL)
  {
    rc = -ENOMEM;
    goto fail;
  }

  for (i = 0; i < pages->n_bufs; i++) {
    addrs[i] = page_address(pages->pages[i]);
  }

  rc = efrm_pd_dma_map(iobrs->pd, pages->n_bufs,
		       nic_order,
		       addrs, sizeof(addrs[0]),
		       &iobrs->dma_addrs[0], sizeof(iobrs->dma_addrs[0]),
		       hw_addrs, sizeof(hw_addrs[0]),
		       put_user_fake, &iobrs->buf_tbl_alloc, reset_pending);
  kfree(addrs);

  if( rc < 0 )
    goto fail;

  OO_DEBUG_VERB(ci_log("%s: [%p] %d pages", __FUNCTION__,
                       iobrs, iobrs->pages->n_bufs));

  efrm_resource_ref(efrm_pd_to_resource(pd));
  oo_atomic_inc(&pages->ref_count);
  *iobrs_out = iobrs;
  return 0;

fail:
  oo_iobufset_free_memory(iobrs);
  return rc;
}