Beispiel #1
0
int
efrm_eventq_register_callback(struct efrm_vi *virs,
			      efrm_evq_callback_fn handler, void *arg)
{
	struct efrm_nic_per_vi *cb_info;
	int instance;
	int bit;
	int rc = 0;

	EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0);
	EFRM_ASSERT(virs->q[EFHW_EVQ].capacity != 0);
	EFRM_ASSERT(handler != NULL);

	mutex_lock(&register_evq_cb_mutex);
	if (virs->evq_callback_fn != NULL) {
		rc = -EBUSY;
		goto unlock_and_out;
	}

	virs->evq_callback_arg = arg;
	wmb();
	virs->evq_callback_fn = handler;

	instance = virs->rs.rs_instance;
	cb_info = &efrm_nic(virs->rs.rs_client->nic)->vis[instance];
	cb_info->vi = virs;
	bit = test_and_set_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED,
			       &cb_info->state);
	EFRM_ASSERT(bit == 0);
unlock_and_out:
	mutex_unlock(&register_evq_cb_mutex);
	return rc;
}
Beispiel #2
0
int
efrm_filter_resource_alloc(struct vi_resource *vi_parent,
			   struct filter_resource **frs_out)
{
	struct filter_resource *frs;
	int rc, instance;

	EFRM_ASSERT(frs_out);
	EFRM_ASSERT(efrm_filter_manager);
	EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm);
	EFRM_ASSERT(vi_parent != NULL);
	EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_parent->rs.rs_handle) ==
		    EFRM_RESOURCE_VI);

	/* Allocate resource data structure.  This is called in atomic
	 * context by the onload driver.
	 */
	frs = kmalloc(sizeof(struct filter_resource), GFP_ATOMIC);
	if (!frs)
		return -ENOMEM;

	/* Allocate an instance. */
	rc = kfifo_get(efrm_filter_manager->free_ids,
		       (unsigned char *)&instance, sizeof(instance));
	if (rc != sizeof(instance)) {
		EFRM_TRACE("%s: out of instances", __FUNCTION__);
		EFRM_ASSERT(rc == 0);
		rc = -EBUSY;
		goto fail1;
	}

	/* Initialise the resource DS. */
	efrm_resource_init(&frs->rs, EFRM_RESOURCE_FILTER, instance);
	frs->pt = vi_parent;
	efrm_resource_ref(&frs->pt->rs);
	frs->filter_idx = -1;

	EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " VI %d", __FUNCTION__,
		   EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle),
		   EFRM_RESOURCE_INSTANCE(vi_parent->rs.rs_handle));

	efrm_client_add_resource(vi_parent->rs.rs_client, &frs->rs);
	*frs_out = frs;
	return 0;

fail1:
	memset(frs, 0, sizeof(*frs));
	kfree(frs);
	return rc;
}
Beispiel #3
0
int efrm_buddy_range_ctor(struct efrm_buddy_allocator *b, int low, int high)
{
	int i, rc, log2_n;
	log2_n = fls(high - 1);
	if ((rc = efrm_buddy_ctor(b, log2_n)) < 0 )
		return rc;
	for (i = 0; i < (1 << log2_n); ++i) {
		rc = efrm_buddy_alloc(b, 0);
		EFRM_ASSERT(rc >= 0);
		EFRM_ASSERT(rc < (1 << log2_n));
	}
	for (i = low; i < high; ++i)
		efrm_buddy_free(b, i, 0);
	return 0;
}
Beispiel #4
0
void efhw_iopages_free(struct pci_dev *pci_dev, struct efhw_iopages *p,
		       efhw_iommu_domain *vf_domain)
{
	struct device *dev = &pci_dev->dev;
	int i;

	for (i = 0; i < p->n_pages; ++i)
		if (!vf_domain)
			dma_unmap_page(dev, p->dma_addrs[i],
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
		else {
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
			mutex_lock(&efrm_iommu_mutex);
			iommu_unmap(vf_domain, p->dma_addrs[i], PAGE_SIZE);
			mutex_unlock(&efrm_iommu_mutex);
#else
			EFRM_ASSERT(0);
#endif
		}
#ifdef CONFIG_SUSE_KERNEL
	/* bug 56168 */
	schedule();
#endif
	vfree(p->ptr);
	kfree(p->dma_addrs);
}
Beispiel #5
0
void
efrm_vf_init_nic_params(struct efhw_nic* nic,
			const struct vi_resource_dimensions *res_dim)
{
	struct efrm_vf_nic_params *nic_params =
		&efrm_vf_manager->nic[nic->index];

#ifndef NDEBUG
	int nic_index;

	/* Sanity check the nic index to ensure it's been initialised before
	 * we got here.
	 */
	for (nic_index = 0; nic_index < EFHW_MAX_NR_DEVS; ++nic_index)
		if (efrm_nic_tablep->nic[nic_index] != NULL &&
		    memcmp(efrm_nic_tablep->nic[nic_index]->mac_addr,
			   nic->mac_addr, ETH_ALEN) == 0)
			break;

	EFRM_ASSERT(nic_index < EFHW_MAX_NR_DEVS);
#endif

	EFRM_TRACE("vf_vi_base=%u vf_vi_scale=%u vf_count=%u",
		   res_dim->vf_vi_base, res_dim->vf_vi_scale,
		   res_dim->vf_count);

	nic_params->vi_base = res_dim->vf_vi_base;
	nic_params->vi_scale = res_dim->vf_vi_scale;
	nic_params->vf_count = res_dim->vf_count;
}
Beispiel #6
0
int
efrm_create_vf_resource_manager(struct efrm_resource_manager **rm_out)
{
	int rc;
	int nic_index;

	EFRM_ASSERT(rm_out);

	efrm_vf_manager = kzalloc(sizeof(*efrm_vf_manager), GFP_KERNEL);
	if (efrm_vf_manager == NULL)
		return -ENOMEM;

	for (nic_index = 0; nic_index < EFHW_MAX_NR_DEVS; ++nic_index)
		INIT_LIST_HEAD(&efrm_vf_manager->nic[nic_index].free_list);

	rc = efrm_resource_manager_ctor(&efrm_vf_manager->rm,
					efrm_vf_rm_dtor, "VF",
					EFRM_RESOURCE_VF);
	if (rc < 0)
		goto fail1;

	*rm_out = &efrm_vf_manager->rm;
	return 0;

fail1:
	EFRM_DO_DEBUG(memset(efrm_vf_manager, 0, sizeof(*efrm_vf_manager)));
	kfree(efrm_vf_manager);
	return rc;
}
Beispiel #7
0
static void efrm_pd_owner_id_free(struct efrm_pd_owner_ids* owner_ids,
				  int owner_id)
{
	/* Must hold pd_manager lock. */
	EFRM_ASSERT(test_bit(owner_id - owner_ids->base, owner_ids->used_ids));
	__clear_bit(owner_id - owner_ids->base, owner_ids->used_ids);
}
Beispiel #8
0
void
efrm_filter_resource_redirect(struct filter_resource *frs,
			      struct vi_resource *vi)
{
	struct efhw_nic *nic = frs->rs.rs_client->nic;
	int vi_instance;

	EFRM_ASSERT(frs != NULL);
	EFRM_ASSERT(vi != NULL);

	vi_instance = EFRM_RESOURCE_INSTANCE(vi->rs.rs_handle);
	if( frs->filter_idx >= 0 )
		efhw_nic_ipfilter_redirect(nic, frs->filter_idx, vi_instance);
	efrm_vi_resource_release(frs->pt);
	frs->pt = vi;
	efrm_resource_ref(&frs->pt->rs);
}
Beispiel #9
0
void efrm_buddy_dtor(struct efrm_buddy_allocator *b)
{
	EFRM_ASSERT(b);

	kfree(b->free_lists);
	vfree(b->links);
	vfree(b->orders);
}
Beispiel #10
0
void compat_pat_wc_shutdown(void)
{
  EFRM_ASSERT(compat_pat_wc.inited);
  if( --compat_pat_wc.inited )
    return;
  if( compat_pat_wc.pat_modified )
    restore_pat();
}
Beispiel #11
0
void efrm_eventq_kill_callback(struct efrm_vi *virs)
{
	struct efrm_nic_per_vi *cb_info;
	int32_t evq_state;
	int instance;
	int bit;

	EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0);
	EFRM_ASSERT(virs->q[EFHW_EVQ].capacity != 0);
	EFRM_ASSERT(virs->rs.rs_client != NULL);

	mutex_lock(&register_evq_cb_mutex);

	instance = virs->rs.rs_instance;
	cb_info = &efrm_nic(virs->rs.rs_client->nic)->vis[instance];
	cb_info->vi = NULL;

	/* Disable the callback. */
#ifdef CONFIG_SFC_RESOURCE_VF
	if (virs->allocation.vf)
		spin_lock(&virs->allocation.vf->vf_evq_cb_lock);
#endif
	bit = test_and_clear_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED,
				 &cb_info->state);
	EFRM_ASSERT(bit);	/* do not call me twice! */
#ifdef CONFIG_SFC_RESOURCE_VF
	if (virs->allocation.vf)
		spin_unlock(&virs->allocation.vf->vf_evq_cb_lock);
#endif

	/* If the vi had been primed, unset it. */
	test_and_clear_bit(VI_RESOURCE_EVQ_STATE_WAKEUP_PENDING,
			   &cb_info->state);

	/* Spin until the callback is complete. */
	do {
		rmb();

		udelay(1);
		evq_state = cb_info->state;
	} while ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)));

	virs->evq_callback_fn = NULL;
	mutex_unlock(&register_evq_cb_mutex);
}
Beispiel #12
0
static void
efx_vi_eventq_callback(void *context, int is_timeout, struct efhw_nic *nic)
{
	struct efx_vi_state *efx_state = (struct efx_vi_state *)context;

	EFRM_ASSERT(efx_state->callback_fn);

	return efx_state->callback_fn(efx_state->callback_arg, is_timeout);
}
Beispiel #13
0
static void filter_rm_dtor(struct efrm_resource_manager *rm)
{
	EFRM_TRACE("%s:", __func__);

	EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm);
	EFRM_ASSERT(&efrm_filter_manager->rm == rm);

	kfifo_vfree(efrm_filter_manager->free_ids);
	EFRM_TRACE("%s: done", __func__);
}
Beispiel #14
0
int efrm_buddy_ctor(struct efrm_buddy_allocator *b, unsigned order)
{
	unsigned o;
	unsigned size = 1 << order;

	DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, order));
	EFRM_ASSERT(b);
	EFRM_ASSERT(order <= sizeof(unsigned) * 8 - 1);

	b->order = order;
	b->free_lists = kmalloc((order + 1) * sizeof(b->free_lists[0]),
				GFP_KERNEL);
	if (b->free_lists == NULL)
		goto fail1;

	b->links = vmalloc(size * sizeof(b->links[0]));
	if (b->links == NULL)
		goto fail2;

	b->orders = vmalloc(size * sizeof(b->orders[0]));
	if (b->orders == NULL)
		goto fail3;

	memset(b->links, 0, size * sizeof(b->links[0]));

	for (o = 0; o <= b->order; ++o)
		INIT_LIST_HEAD(b->free_lists + o);

	efrm_buddy_free_list_add(b, b->order, 0);

	return 0;

fail3:
	vfree(b->links);
fail2:
	kfree(b->free_lists);
fail1:
	return -ENOMEM;
}
Beispiel #15
0
void
efrm_vf_resource_free(struct efrm_vf *vf)
{
	EFRM_TRACE("NIC %d VF %d free",
		   vf->rs.rs_client->nic->index, vf->pci_dev_fn);
	EFRM_ASSERT(vf->rs.rs_ref_count == 0);
	efrm_buddy_dtor(&vf->vi_instances);
	efrm_vf_free_reset(vf);

	spin_lock_bh(&efrm_vf_manager->rm.rm_lock);
	list_add(&vf->link, &efrm_vf_manager->nic[vf->nic_index].free_list);
	spin_unlock_bh(&efrm_vf_manager->rm.rm_lock);
	efrm_client_put(vf->rs.rs_client);
	efrm_pd_owner_ids_dtor(vf->owner_ids);
}
Beispiel #16
0
/*********************************************************************
 *
 *  VF creation:
 *  OS-independent parts to be called after VF is really probed by OS.
 *
 *********************************************************************/
static void efrm_vf_enumerate(int nic_index)
{
	int first_fn = 0xffff, second_fn = 0xffff;
	struct efrm_vf_nic_params *nic = &efrm_vf_manager->nic[nic_index];
	struct list_head *link;

	EFRM_ASSERT(nic->vfs_probed == nic->vf_count);

	EFRM_NOTICE("All %d VFs for NIC %d are discovered",
		    nic->vf_count, nic_index);

	if (nic->vfs_probed == 1) {
		list_entry(nic->free_list.next, struct efrm_vf,
			   link)->vi_base = nic->vi_base;
		return;
	}
Beispiel #17
0
int
__efrm_filter_resource_set(struct filter_resource *frs, int type,
			   unsigned saddr, uint16_t sport,
			   unsigned daddr, uint16_t dport)
{
	struct efhw_nic *nic = frs->rs.rs_client->nic;
	int vi_instance;

	EFRM_ASSERT(frs);

	if (efrm_nic_tablep->a_nic->devtype.variant >= 'B' &&
	    (frs->pt->flags & EFHW_VI_JUMBO_EN) == 0)
		type |= EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK;
	vi_instance = EFRM_RESOURCE_INSTANCE(frs->pt->rs.rs_handle);

	return efhw_nic_ipfilter_set(nic, type, &frs->filter_idx,
				     vi_instance, saddr, sport, daddr, dport);
}
Beispiel #18
0
void efrm_pd_free(struct efrm_pd *pd)
{
	struct efrm_pd_owner_ids *owner_ids;

	mutex_destroy(&pd->remap_lock);

	efrm_pd_os_stats_dtor(pd, pd->os_data);

	if (efrm_pd_has_vport(pd))
#if EFX_DRIVERLINK_API_VERSION < 25
		ef10_vport_free(pd->rs.rs_client->nic, pd->vport_id);
#else
		efrm_vport_free(pd->rs.rs_client, pd->vport_handle);
#endif

	efrm_pd_stack_id_free(pd);

	spin_lock_bh(&pd_manager->rm.rm_lock);
	if (pd->owner_id != OWNER_ID_PHYS_MODE) {
#ifdef CONFIG_SFC_RESOURCE_VF
		if (pd->vf)
			owner_ids = pd->vf->owner_ids;
		else
#endif
		owner_ids = efrm_nic_from_rs(&pd->rs)->owner_ids;
		EFRM_ASSERT(owner_ids != NULL);
		efrm_pd_owner_id_free(owner_ids, pd->owner_id);
	}
	spin_unlock_bh(&pd_manager->rm.rm_lock);
#ifdef CONFIG_SFC_RESOURCE_VF
	if (pd->vf != NULL)
		efrm_vf_resource_release(pd->vf);
#endif
	if (pd->owner_id != OWNER_ID_PHYS_MODE) {
		int ord;
		for (ord = 0;
		     ord < efhw_nic_buffer_table_orders_num(
					pd->rs.rs_client->nic);
		     ord++)
			efrm_bt_manager_dtor(&pd->bt_managers[ord]);
	}
	efrm_client_put(pd->rs.rs_client);
	kfree(pd);
}
Beispiel #19
0
int
efrm_resources_init(void)
{
	int i, rc;

	/* Create resources in the correct order */
	for (i = 0; i < EFRM_RESOURCE_NUM; ++i) {
		struct efrm_resource_manager **rmp = &efrm_rm_table[i];

		EFRM_ASSERT(*rmp == NULL);
		switch (i) {
		case EFRM_RESOURCE_VI:
			rc = efrm_create_vi_resource_manager(rmp);
			break;
		case EFRM_RESOURCE_VI_SET:
			rc = efrm_create_vi_set_resource_manager(rmp);
			break;
#ifdef CONFIG_SFC_RESOURCE_VF
		case EFRM_RESOURCE_VF:
			rc = efrm_create_vf_resource_manager(rmp);
			break;
#endif
		case EFRM_RESOURCE_PD:
			rc = efrm_create_pd_resource_manager(rmp);
			break;
		case EFRM_RESOURCE_PIO:
			rc = efrm_create_pio_resource_manager(rmp);
			break;
		default:
			rc = 0;
			break;
		}

		if (rc < 0) {
			EFRM_ERR("%s: failed type=%d (%d)",
				 __FUNCTION__, i, rc);
			return rc;
		}
	}

	return 0;
}
Beispiel #20
0
void efrm_vi_get_mappings(struct efrm_vi* vi, struct efrm_vi_mappings* vm)
{
  struct efhw_nic *nic = vi->rs.rs_client->nic;

  EFRM_RESOURCE_ASSERT_VALID(&vi->rs, 0);

  common_vi_get_mappings(vi, nic, vm);

  switch( nic->devtype.arch ) {
  case EFHW_ARCH_FALCON:
    falcon_vi_get_mappings(vi, nic, vm);
    break;
  case EFHW_ARCH_EF10:
    ef10_vi_get_mappings(vi, nic, vm);
    break;
  default:
    EFRM_ASSERT(0);
    break;
  }
}
Beispiel #21
0
int efrm_buddy_alloc(struct efrm_buddy_allocator *b, unsigned order)
{
	unsigned smallest;
	unsigned addr;

	DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, order));
	EFRM_ASSERT(b);

	/* Find smallest chunk that is big enough.  ?? Can optimise this by
	 ** keeping array of pointers to smallest chunk for each order.
	 */
	smallest = order;
	while (smallest <= b->order &&
	       efrm_buddy_free_list_empty(b, smallest))
		++smallest;

	if (smallest > b->order) {
		DEBUG_ALLOC(EFRM_NOTICE
			    ("buddy - alloc order %d failed - max order %d",
			     order, b->order););
Beispiel #22
0
int
efrm_resources_init(const struct vi_resource_dimensions *vi_res_dim,
		    int buffer_table_min, int buffer_table_lim)
{
	int i, rc;

	rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_lim);
	if (rc != 0)
		return rc;

	/* Create resources in the correct order */
	for (i = 0; i < EFRM_RESOURCE_NUM; ++i) {
		struct efrm_resource_manager **rmp = &efrm_rm_table[i];

		EFRM_ASSERT(*rmp == NULL);
		switch (i) {
		case EFRM_RESOURCE_VI:
			rc = efrm_create_vi_resource_manager(rmp,
							     vi_res_dim);
			break;
		case EFRM_RESOURCE_FILTER:
			rc = efrm_create_filter_resource_manager(rmp);
			break;
		case EFRM_RESOURCE_IOBUFSET:
			rc = efrm_create_iobufset_resource_manager(rmp);
			break;
		default:
			rc = 0;
			break;
		}

		if (rc < 0) {
			EFRM_ERR("%s: failed type=%d (%d)",
				 __FUNCTION__, i, rc);
			efrm_buffer_table_dtor();
			return rc;
		}
	}

	return 0;
}
Beispiel #23
0
void efrm_eventq_reset(struct efrm_vi *virs)
{
	struct efhw_nic *nic = virs->rs.rs_client->nic;
	struct efrm_nic *efrm_nic = container_of(nic, struct efrm_nic,
						 efhw_nic);
	int instance = virs->rs.rs_instance;
	int wakeup_evq;

	EFRM_ASSERT(virs->q[EFHW_EVQ].capacity != 0);

	/* FIXME: Protect against concurrent resets. */

	efhw_nic_event_queue_disable(nic, instance,
				  (virs->flags & EFHW_VI_RX_TIMESTAMPS) != 0);

	wakeup_evq = virs->net_drv_wakeup_channel >= 0?
		virs->net_drv_wakeup_channel:
		efrm_nic->rss_channel_count == 0?
		0:
		instance % efrm_nic->rss_channel_count;
	memset(efrm_eventq_base(virs), EFHW_CLEAR_EVENT_VALUE,
	       efrm_vi_rm_evq_bytes(virs, -1));
	virs->out_flags = 0;
	/* NB. We do not enable DOS protection because of bug12916. */
	efhw_nic_event_queue_enable(nic, instance, virs->q[EFHW_EVQ].capacity,
			efrm_bt_allocation_base(&virs->q[EFHW_EVQ].bt_alloc),
			virs->q[EFHW_EVQ].dma_addrs, 
			1 << virs->q[EFHW_EVQ].page_order,
				    /* make siena look like falcon for now */
				    instance < 64, 
				    0, wakeup_evq,
				    (virs->flags &
				     (EFHW_VI_RX_TIMESTAMPS |
				      EFHW_VI_TX_TIMESTAMPS)) != 0,
				    (virs->flags &
				     EFHW_VI_NO_CUT_THROUGH) == 0,
				    &virs->rx_ts_correction,
				    &virs->out_flags);
}
Beispiel #24
0
int efrm_create_filter_resource_manager(struct efrm_resource_manager **rm_out)
{
	int rc;

	EFRM_ASSERT(rm_out);

	efrm_filter_manager =
	    kmalloc(sizeof(struct filter_resource_manager), GFP_KERNEL);
	if (efrm_filter_manager == 0)
		return -ENOMEM;
	memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager));

	rc = efrm_resource_manager_ctor(&efrm_filter_manager->rm,
					filter_rm_dtor, "FILTER",
					EFRM_RESOURCE_FILTER);
	if (rc < 0)
		goto fail1;

	/* Create a pool of free instances */
	rc = efrm_kfifo_id_ctor(&efrm_filter_manager->free_ids,
				0, EFHW_IP_FILTER_NUM,
				&efrm_filter_manager->rm.rm_lock);
	if (rc != 0)
		goto fail2;

	*rm_out = &efrm_filter_manager->rm;
	EFRM_TRACE("%s: filter resources created - %d IDs",
		   __func__, kfifo_len(efrm_filter_manager->free_ids));
	return 0;

fail2:
	efrm_resource_manager_dtor(&efrm_filter_manager->rm);
fail1:
	memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager));
	kfree(efrm_filter_manager);
	return rc;

}
Beispiel #25
0
int efrm_pd_alloc(struct efrm_pd **pd_out, struct efrm_client *client_opt,
		  struct efrm_vf *vf_opt, int flags)
{
	struct efrm_pd *pd;
	int rc, instance;
	struct efrm_pd_owner_ids *owner_ids;
	int orders_num = 0;


	EFRM_ASSERT((client_opt != NULL) || (vf_opt != NULL));
	if ((flags &
	    ~(EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE |
	    EFRM_PD_ALLOC_FLAG_HW_LOOPBACK)) != 0) {
		rc = -EINVAL;
		goto fail1;
	}

	if (!(flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE)) {
		orders_num = efhw_nic_buffer_table_orders_num(
						client_opt->nic);
		EFRM_ASSERT(orders_num);
		EFRM_ASSERT(efhw_nic_buffer_table_orders(
						client_opt->nic)[0] == 0);
	}
	pd = kmalloc(sizeof(*pd) + orders_num * sizeof(pd->bt_managers[0]),
		     GFP_KERNEL);
	if (pd == NULL) {
		rc = -ENOMEM;
		goto fail1;
	}
	pd->stack_id = 0;

	spin_lock_bh(&pd_manager->rm.rm_lock);
	instance = pd_manager->next_instance++;
	if (flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE) {
		pd->owner_id = OWNER_ID_PHYS_MODE;
	}
	else {
#ifdef CONFIG_SFC_RESOURCE_VF
		if (vf_opt != NULL)
			owner_ids = vf_opt->owner_ids;
		else
#endif
		owner_ids = efrm_nic_from_client(client_opt)->owner_ids;
		EFRM_ASSERT(owner_ids != NULL);
		pd->owner_id = efrm_pd_owner_id_alloc(owner_ids);
	}
	spin_unlock_bh(&pd_manager->rm.rm_lock);
	if (pd->owner_id == OWNER_ID_ALLOC_FAIL) {
		rc = -EBUSY;
		goto fail2;
	}
#ifdef CONFIG_SFC_RESOURCE_VF
	pd->vf = vf_opt;
	if (pd->vf != NULL) {
		struct efrm_resource *vfrs = efrm_vf_to_resource(pd->vf);
		efrm_resource_ref(vfrs);
		client_opt = vfrs->rs_client;
	}
#endif
	if (!(flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE)) {
		int ord;
		for (ord = 0; ord < orders_num; ord++) {
			efrm_bt_manager_ctor(
				&pd->bt_managers[ord], pd->owner_id,
				efhw_nic_buffer_table_orders(
						client_opt->nic)[ord]
				);
		}
	}
	efrm_resource_init(&pd->rs, EFRM_RESOURCE_PD, instance);
	efrm_client_add_resource(client_opt, &pd->rs);

	pd->os_data = efrm_pd_os_stats_ctor(pd);
	pd->min_nic_order = 0;

#if EFX_DRIVERLINK_API_VERSION < 25
	pd->vport_id = EFRM_PD_VPORT_ID_NONE;
#else
	pd->vport_handle = EFRM_PD_VPORT_ID_NONE;
#endif
	mutex_init(&pd->remap_lock);
	if (flags & EFRM_PD_ALLOC_FLAG_HW_LOOPBACK) {
		if ((rc = efrm_pd_stack_id_alloc(pd)) != 0) {
			efrm_pd_release(pd);
			return rc;
		}
	}

	*pd_out = pd;
	return 0;


fail2:
	kfree(pd);
fail1:
	return rc;
}
Beispiel #26
0
static int
efrm_eventq_do_callback(struct efhw_nic *nic, unsigned instance,
			bool is_timeout, int budget)
{
	struct efrm_nic *rnic = efrm_nic(nic);
	efrm_evq_callback_fn handler;
	void *arg;
	struct efrm_nic_per_vi *cb_info;
	int32_t evq_state;
	int32_t new_evq_state;
	struct efrm_vi *virs;
	int bit;
	int rc = 0;

	EFRM_ASSERT(efrm_vi_manager);

	cb_info = &rnic->vis[instance];

	/* Set the BUSY bit and clear WAKEUP_PENDING.  Do this
	 * before waking up the sleeper to avoid races. */
	while (1) {
		evq_state = cb_info->state;
		new_evq_state = evq_state;

		if ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)) != 0) {
			EFRM_ERR("%s:%d: evq_state[%d] corrupted!",
				 __FUNCTION__, __LINE__, instance);
			return 0;
		}

		if (!is_timeout)
			new_evq_state &= ~VI_RESOURCE_EVQ_STATE(WAKEUP_PENDING);

		if (evq_state & VI_RESOURCE_EVQ_STATE(CALLBACK_REGISTERED)) {
			new_evq_state |= VI_RESOURCE_EVQ_STATE(BUSY);
			virs = cb_info->vi;
			if (cmpxchg(&cb_info->state, evq_state,
				    new_evq_state) == evq_state)
				break;
		} else {
			/* Just update the state if necessary. */
			if (new_evq_state == evq_state ||
			    cmpxchg(&cb_info->state, evq_state,
				    new_evq_state) == evq_state)
				return 0;
		}
	}

	if (virs) {
		handler = virs->evq_callback_fn;
		rmb();
		arg = virs->evq_callback_arg;
		EFRM_ASSERT(handler != NULL);
		rc = handler(arg, is_timeout, nic, budget);
	}

	/* Clear the BUSY bit. */
	bit =
	    test_and_clear_bit(VI_RESOURCE_EVQ_STATE_BUSY,
			       &cb_info->state);
	if (!bit) {
		EFRM_ERR("%s:%d: evq_state corrupted!",
			 __FUNCTION__, __LINE__);
	}

	return rc;
}
Beispiel #27
0
int
efhw_iopages_alloc(struct pci_dev *pci_dev, struct efhw_iopages *p,
		   unsigned order, efhw_iommu_domain *vf_domain,
		   unsigned long iova_base)
{
	/* dma_alloc_coherent() is really the right interface to use here.
	 * However, it allocates memory "close" to the device, but we want
	 * memory on the current numa node.  Also we need the memory to be
	 * contiguous in the kernel, but not necessarily in physical
	 * memory.
	 *
	 * vf_domain is the IOMMU protection domain - it imples that pci_dev
	 * is a VF that should not use the normal DMA mapping APIs
	 */
	struct device *dev = &pci_dev->dev;
	int i = 0;

	p->n_pages = 1 << order;
	p->dma_addrs = kmalloc(p->n_pages * sizeof(p->dma_addrs[0]), 0);
	if (p->dma_addrs == NULL)
		goto fail1;
	p->ptr = vmalloc_node(p->n_pages << PAGE_SHIFT, -1);
	if (p->ptr == NULL)
		goto fail2;
	for (i = 0; i < p->n_pages; ++i) {
		struct page *page;
		page = vmalloc_to_page(p->ptr + (i << PAGE_SHIFT));

		if (!vf_domain) {
			p->dma_addrs[i] = dma_map_page(dev, page, 0, PAGE_SIZE,
						       DMA_BIDIRECTIONAL);
			
			if (dma_mapping_error(dev, p->dma_addrs[i])) {
				EFHW_ERR("%s: ERROR dma_map_page failed",
					 __FUNCTION__);
				goto fail3;
			}
		} else
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
		{
			int rc;

			p->dma_addrs[i] = iova_base;
			rc = iommu_map(vf_domain, p->dma_addrs[i],
				       page_to_phys(page), PAGE_SIZE,
				       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
			if (rc) {
				EFHW_ERR("%s: ERROR iommu_map failed (%d)",
					 __FUNCTION__, rc);
				goto fail3;
			}
			iova_base += PAGE_SIZE;
		}
#else
		EFRM_ASSERT(0);
#endif
	}
	return 0;

fail3:
	while (i-- > 0)
		if (!vf_domain) {
			dma_unmap_page(dev, p->dma_addrs[i],
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
		} else {
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
			mutex_lock(&efrm_iommu_mutex);
			iommu_unmap(vf_domain, iova_base, PAGE_SIZE);
			mutex_unlock(&efrm_iommu_mutex);
#endif
		}
fail2:
	kfree(p->dma_addrs);
fail1:
	return -ENOMEM;
}
Beispiel #28
0
int
efx_vi_hw_resource_get_phys(struct efx_vi_state *vih,
			    struct efx_vi_hw_resource_metadata *mdata,
			    struct efx_vi_hw_resource *hw_res_array,
			    int *length)
{
	struct efx_vi_state *efx_state = vih;
	struct linux_efhw_nic *lnic = linux_efhw_nic(efx_state->nic);
	unsigned long phys = lnic->ctr_ap_pci_addr;
	struct efrm_resource *ep_res = &efx_state->vi_res->rs;
	unsigned ep_mmap_bytes;
	int i;

	if (*length < EFX_VI_HW_RESOURCE_MAXSIZE)
		return -EINVAL;

	mdata->nic_arch = efx_state->nic->devtype.arch;
	mdata->nic_variant = efx_state->nic->devtype.variant;
	mdata->nic_revision = efx_state->nic->devtype.revision;

	mdata->evq_order =
	    efx_state->vi_res->nic_info.evq_pages.iobuff.order;
	mdata->evq_offs = efx_state->vi_res->nic_info.evq_pages.iobuff_off;
	mdata->evq_capacity = efx_vi_eventq_size;
	mdata->instance = EFRM_RESOURCE_INSTANCE(ep_res->rs_handle);
	mdata->rx_capacity = FALCON_DMA_Q_DEFAULT_RX_SIZE;
	mdata->tx_capacity = FALCON_DMA_Q_DEFAULT_TX_SIZE;

	ep_mmap_bytes = FALCON_DMA_Q_DEFAULT_MMAP;
	EFRM_ASSERT(ep_mmap_bytes == PAGE_SIZE * 2);

#ifndef NDEBUG
	{
		/* Sanity about doorbells */
		unsigned long tx_dma_page_addr, rx_dma_page_addr;

		/* get rx doorbell address */
		rx_dma_page_addr =
		    phys + falcon_rx_dma_page_addr(mdata->instance);
		/* get tx doorbell address */
		tx_dma_page_addr =
		    phys + falcon_tx_dma_page_addr(mdata->instance);

		/* Check the lower bits of the TX doorbell will be
		 * consistent. */
		EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST &
			     FALCON_DMA_PAGE_MASK) ==
			    (TX_DESC_UPD_REG_PAGE123K_OFST &
			     FALCON_DMA_PAGE_MASK));

		/* Check the lower bits of the RX doorbell will be
		 * consistent. */
		EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST &
			     FALCON_DMA_PAGE_MASK) ==
			    (RX_DESC_UPD_REG_PAGE123K_OFST &
			     FALCON_DMA_PAGE_MASK));

		/* Check that the doorbells will be in the same page. */
		EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK) ==
			    (RX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK));

		/* Check that the doorbells are in the same page. */
		EFRM_ASSERT((tx_dma_page_addr & PAGE_MASK) ==
			    (rx_dma_page_addr & PAGE_MASK));

		/* Check that the TX doorbell offset is correct. */
		EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) ==
			    (tx_dma_page_addr & ~PAGE_MASK));

		/* Check that the RX doorbell offset is correct. */
		EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) ==
			    (rx_dma_page_addr & ~PAGE_MASK));
	}
#endif

	i = 0;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_TXDMAQ;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address =
		(unsigned long)efx_state->vi_res->nic_info.
			dmaq_pages[EFRM_VI_RM_DMA_QUEUE_TX].kva;

	i++;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_RXDMAQ;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address =
		(unsigned long)efx_state->vi_res->nic_info.
			dmaq_pages[EFRM_VI_RM_DMA_QUEUE_RX].kva;

	i++;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQTIMER;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address =
		(unsigned long)phys + falcon_timer_page_addr(mdata->instance);

	/* NB EFX_VI_HW_RESOURCE_EVQPTR not used on Falcon */

	i++;
	switch (efx_state->nic->devtype.variant) {
	case 'A':
		hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR;
		hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
		hw_res_array[i].more_to_follow = 0;
		hw_res_array[i].length = PAGE_SIZE;
		hw_res_array[i].address = (unsigned long)phys +
			EVQ_RPTR_REG_OFST +
			(FALCON_REGISTER128 * mdata->instance);
		break;
	case 'B':
		hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET;
		hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
		hw_res_array[i].more_to_follow = 0;
		hw_res_array[i].length = PAGE_SIZE;
		hw_res_array[i].address =
			(unsigned long)FALCON_EVQ_RPTR_REG_P0;
		break;
	default:
		EFRM_ASSERT(0);
		break;
	}

	i++;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQMEMKVA;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_IOBUFFER;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address = (unsigned long)efx_state->vi_res->
		nic_info.evq_pages.iobuff.kva;

	i++;
	hw_res_array[i].type = EFX_VI_HW_RESOURCE_BELLPAGE;
	hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
	hw_res_array[i].more_to_follow = 0;
	hw_res_array[i].length = PAGE_SIZE;
	hw_res_array[i].address =
		(unsigned long)(phys +
				falcon_tx_dma_page_addr(mdata->instance))
		>> PAGE_SHIFT;

	i++;

	EFRM_ASSERT(i <= *length);

	*length = i;

	return 0;
}
Beispiel #29
0
int
efrm_vf_resource_alloc(struct efrm_client *client, 
		       struct efrm_vf *linked, int use_iommu,
		       struct efrm_vf **vf_out)
{
	struct efrm_vf_nic_params *nic =
		&efrm_vf_manager->nic[client->nic->index];
	struct efrm_vf *vf;
	int rc = 0;
	struct efrm_pd_owner_ids *owner_ids;

	if (nic->vf_count != nic->vfs_probed) {
		EFRM_ERR("%s: not all VFs for NIC %d are discovered yet: "
			 "%d out of %d", __func__, client->nic->index, 
			 nic->vfs_probed, nic->vf_count);
		return -EBUSY;
	}

	spin_lock_bh(&efrm_vf_manager->rm.rm_lock);
	if (list_empty(&nic->free_list)) {
		spin_unlock_bh(&efrm_vf_manager->rm.rm_lock);
		return rc == 0 ? -ENOBUFS : rc;
	}
	vf = list_entry(nic->free_list.next, struct efrm_vf, link);
	list_del(&vf->link);
	spin_unlock_bh(&efrm_vf_manager->rm.rm_lock);

	rc = efrm_vf_alloc_init(vf, linked, use_iommu);
	if (rc != 0) {
		/* Scary warnings are already printed, just return */
		/* Add to the tail of the list in hope another function
		 * is better. */
		list_add_tail(&vf->link,
			      &efrm_vf_manager->nic[vf->nic_index].free_list);
		return rc;
	}

	EFRM_ASSERT(vf);
	EFRM_ASSERT(vf->irq_count);
	EFRM_ASSERT(vf->vi_count);

	rc = efrm_buddy_range_ctor(&vf->vi_instances, vf->vi_base,
				   vf->vi_base + vf->vi_count);
	if (rc < 0) {
		EFRM_ERR("NIC %d VF %d: efrm_buddy_range_ctor(%d, %d) failed",
			 client->nic->index, vf->pci_dev_fn,
			 vf->vi_base, vf->vi_base + vf->vi_count);
		spin_lock_bh(&efrm_vf_manager->rm.rm_lock);
		list_add(&vf->link,
			 &efrm_vf_manager->nic[vf->nic_index].free_list);
		spin_unlock_bh(&efrm_vf_manager->rm.rm_lock);
		return -ENOMEM;
	}

	EFRM_ASSERT(vf->rs.rs_ref_count == 0);
	efrm_resource_init(&vf->rs, EFRM_RESOURCE_VF, vf->pci_dev_fn);

	efrm_client_add_resource(client, &vf->rs);

	owner_ids = efrm_pd_owner_ids_ctor(
		/* On falcon owner_ids are global, so base this block on the
		 * base vi id to avoid overlap.
		 */
		client->nic->devtype.arch == EFHW_ARCH_EF10 ? 1 : vf->vi_base,
		(1 << vf->vi_scale));
	if (!owner_ids)
		return -ENOMEM;
	vf->owner_ids = owner_ids;

	EFRM_TRACE("NIC %d VF %d allocated",
		   client->nic->index, vf->pci_dev_fn);
	*vf_out = vf;
	return 0;
}
Beispiel #30
0
/*********************************************************************
 *
 *  Create/destroy RM
 *
 *********************************************************************/
static void efrm_vf_rm_dtor(struct efrm_resource_manager *rm)
{
	EFRM_ASSERT(&efrm_vf_manager->rm == rm);
}