Пример #1
0
int efx_vi_alloc(struct efx_vi_state **vih_out, int ifindex)
{
	struct efx_vi_state *efx_state;
	int rc;

	efx_state = kmalloc(sizeof(struct efx_vi_state), GFP_KERNEL);

	if (!efx_state) {
		EFRM_ERR("%s: failed to allocate memory for efx_vi_state",
			 __func__);
		rc = -ENOMEM;
		goto fail;
	}

	efx_state->ifindex = ifindex;
	rc = efrm_client_get(ifindex, NULL, NULL, &efx_state->efrm_client);
	if (rc < 0) {
		EFRM_ERR("%s: efrm_client_get(%d) failed: %d", __func__,
			 ifindex, rc);
		rc = -ENODEV;
		goto fail_no_ifindex;
	}
	efx_state->nic = efrm_client_get_nic(efx_state->efrm_client);

	init_completion(&efx_state->flush_completion);

	/* basically allocate_pt_endpoint() */
	rc = alloc_ep(efx_state);
	if (rc) {
		EFRM_ERR("%s: alloc_ep failed: %d", __func__, rc);
		goto fail_no_pt;
	}
#if EFX_VI_STATIC_FILTERS
	/* Statically allocate a set of filter resources - removes the
	   restriction on not being able to use efx_vi_filter() from
	   in_atomic() */
	rc = efx_vi_alloc_static_filters(efx_state);
	if (rc)
		goto fail_no_filters;
#endif

	*vih_out = efx_state;

	return 0;
#if EFX_VI_STATIC_FILTERS
fail_no_filters:
	free_ep(efx_state);
#endif
fail_no_pt:
	efrm_client_put(efx_state->efrm_client);
fail_no_ifindex:
	kfree(efx_state);
fail:
	return rc;
}
Пример #2
0
static int
efab_vi_rm_mmap_ctpio(struct efrm_vi *virs, unsigned long *bytes, void *opaque,
                      int *map_num, unsigned long *offset)
{
  int rc;
  int len;
  int instance;
  struct efhw_nic *nic;
  int bar_off;

  /* The CTPIO region is 12K from the start of the VI's aperture. */
  const int CTPIO_OFFSET = 12 * 1024;

  instance = virs->rs.rs_instance;

  if( ! (virs->flags & EFHW_VI_TX_CTPIO) ) {
    EFRM_ERR("%s: CTPIO is not enabled on VI instance %d\n", __FUNCTION__,
	     instance);
    return -EINVAL;
  }

  /* Map the CTPIO region, which is 12K from the start of the VI's aperture. */
  len = CI_MIN(*bytes, CI_PAGE_SIZE);
  *bytes -= len;
  nic = efrm_client_get_nic(virs->rs.rs_client);
  ci_assert_ge(nic->vi_stride, CTPIO_OFFSET + len);
  bar_off = (ef10_tx_dma_page_base(nic->vi_stride, instance) + CTPIO_OFFSET) &
            PAGE_MASK;
  rc = ci_mmap_bar(nic, bar_off, len, opaque, map_num, offset, 1);
  if( rc < 0 )
    EFCH_ERR("%s: ERROR: ci_mmap_bar failed rc=%d", __FUNCTION__, rc);
  return rc;
}
Пример #3
0
static int
efab_vi_rm_mmap_pio(struct efrm_vi *virs,
		    unsigned long *bytes, void *opaque,
		    int *map_num, unsigned long *offset)
{
  int rc;
  int len;
  int instance;
  struct efhw_nic *nic;
  int bar_off;

  nic = efrm_client_get_nic(virs->rs.rs_client);

  if( nic->devtype.arch != EFHW_ARCH_EF10 ) {
    EFRM_ERR("%s: Only ef10 supports PIO."
	     "  Expected arch=%d but got %d\n", __FUNCTION__,
	     EFHW_ARCH_EF10, nic->devtype.arch);
    return -EINVAL;
  }

  instance = virs->rs.rs_instance;

  /* Map the control page. */
  len = CI_MIN(*bytes, CI_PAGE_SIZE);
  *bytes -= len;
  bar_off = (ef10_tx_dma_page_base(nic->vi_stride, instance) + 4096) &
            PAGE_MASK;
  rc = ci_mmap_bar(nic, bar_off, len, opaque, map_num, offset, 1);
  if( rc < 0 )
    EFCH_ERR("%s: ERROR: ci_mmap_bar failed rc=%d", __FUNCTION__, rc);
  return rc;
}
Пример #4
0
static int efx_vi_alloc_static_filters(struct efx_vi_state *efx_state)
{
	int i;
	int rc;

	efx_state->free_fres = efx_state->used_fres = NULL;

	for (i = 0; i < EFX_VI_STATIC_FILTERS; i++) {
		rc = efrm_filter_resource_alloc(efx_state->vi_res,
						&efx_state->fres[i].fres);
		if (rc < 0) {
			EFRM_ERR("%s: efrm_filter_resource_alloc failed: %d",
			     __func__, rc);
			while (i > 0) {
				i--;
				efrm_filter_resource_release(efx_state->
							     fres[i].fres);
			}
			efx_state->free_fres = NULL;
			return rc;
		}
		efx_state->fres[i].next = efx_state->free_fres;
		efx_state->free_fres = &efx_state->fres[i];
	}

	return 0;
}
Пример #5
0
int compat_pat_wc_init(void)
{
  int rc;
  struct cpuinfo_x86* cpu_info = &boot_cpu_data;
  if( compat_pat_wc.inited ) {
    ++compat_pat_wc.inited;
    return 0;
  }

  if( !cpu_has(cpu_info, X86_FEATURE_MSR) || !cpu_has(cpu_info, X86_FEATURE_PAT) ) {
    EFRM_ERR("%s: ERROR: PAT not available on this processor", __func__);
    return -ENOSYS;
  }

  rc = setup_pat();
  switch (rc) {
    case -EIO:
      EFRM_ERR("%s: ERROR: failed accessing PAT register", __func__);
      return rc;
    case -EFAULT:
      EFRM_ERR("%s: ERROR: PAT registers inconsistent across CPUs", __func__);
      return rc;
    case -ENOSPC:
      EFRM_ERR("%s: ERROR: incompatible PAT modification detected %llx",
          __func__, compat_pat_wc.original_pat.u64);
      return rc;
    case -EALREADY:
      EFRM_WARN("%s: WARNING: compatible PAT modification detected %llx",
          __func__, compat_pat_wc.original_pat.u64);
    case 0:
      EFRM_WARN( "%s: PAT modified for WC", __func__);
      break;
    default:
      EFRM_ERR( "%s: unknown return code", __func__);
  }

  compat_pat_wc.inited = 1;
  return 0;
}
Пример #6
0
int
efrm_resources_init(void)
{
	int i, rc;

	/* Create resources in the correct order */
	for (i = 0; i < EFRM_RESOURCE_NUM; ++i) {
		struct efrm_resource_manager **rmp = &efrm_rm_table[i];

		EFRM_ASSERT(*rmp == NULL);
		switch (i) {
		case EFRM_RESOURCE_VI:
			rc = efrm_create_vi_resource_manager(rmp);
			break;
		case EFRM_RESOURCE_VI_SET:
			rc = efrm_create_vi_set_resource_manager(rmp);
			break;
#ifdef CONFIG_SFC_RESOURCE_VF
		case EFRM_RESOURCE_VF:
			rc = efrm_create_vf_resource_manager(rmp);
			break;
#endif
		case EFRM_RESOURCE_PD:
			rc = efrm_create_pd_resource_manager(rmp);
			break;
		case EFRM_RESOURCE_PIO:
			rc = efrm_create_pio_resource_manager(rmp);
			break;
		default:
			rc = 0;
			break;
		}

		if (rc < 0) {
			EFRM_ERR("%s: failed type=%d (%d)",
				 __FUNCTION__, i, rc);
			return rc;
		}
	}

	return 0;
}
Пример #7
0
int
efrm_resources_init(const struct vi_resource_dimensions *vi_res_dim,
		    int buffer_table_min, int buffer_table_lim)
{
	int i, rc;

	rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_lim);
	if (rc != 0)
		return rc;

	/* Create resources in the correct order */
	for (i = 0; i < EFRM_RESOURCE_NUM; ++i) {
		struct efrm_resource_manager **rmp = &efrm_rm_table[i];

		EFRM_ASSERT(*rmp == NULL);
		switch (i) {
		case EFRM_RESOURCE_VI:
			rc = efrm_create_vi_resource_manager(rmp,
							     vi_res_dim);
			break;
		case EFRM_RESOURCE_FILTER:
			rc = efrm_create_filter_resource_manager(rmp);
			break;
		case EFRM_RESOURCE_IOBUFSET:
			rc = efrm_create_iobufset_resource_manager(rmp);
			break;
		default:
			rc = 0;
			break;
		}

		if (rc < 0) {
			EFRM_ERR("%s: failed type=%d (%d)",
				 __FUNCTION__, i, rc);
			efrm_buffer_table_dtor();
			return rc;
		}
	}

	return 0;
}
Пример #8
0
static inline int alloc_ep(struct efx_vi_state *state)
{
	int rc;

	rc = efrm_vi_resource_alloc(state->efrm_client, NULL, EFHW_VI_JUMBO_EN,
				    efx_vi_eventq_size,
				    FALCON_DMA_Q_DEFAULT_TX_SIZE,
				    FALCON_DMA_Q_DEFAULT_RX_SIZE,
				    0, 0, &state->vi_res, NULL, NULL, NULL,
				    NULL);
	if (rc < 0) {
		EFRM_ERR("%s: ERROR efrm_vi_resource_alloc error %d",
			 __func__, rc);
		return rc;
	}

	efrm_vi_register_flush_callback(state->vi_res, &efx_vi_flush_complete,
					(void *)state);

	return 0;
}
Пример #9
0
static void
release_filter(struct efx_vi_state *efx_state, struct filter_resource *fres)
{
#if EFX_VI_STATIC_FILTERS
	struct filter_list_t *flist = efx_state->used_fres, *prev = NULL;
	while (flist) {
		if (flist->fres == fres) {
			if (prev)
				prev->next = flist->next;
			else
				efx_state->used_fres = flist->next;
			flist->next = efx_state->free_fres;
			efx_state->free_fres = flist;
			return;
		}
		prev = flist;
		flist = flist->next;
	}
	EFRM_ERR("%s: couldn't find filter", __func__);
#else
	return efrm_filter_resource_release(fres);
#endif
}
Пример #10
0
int
efrm_vf_resource_alloc(struct efrm_client *client, 
		       struct efrm_vf *linked, int use_iommu,
		       struct efrm_vf **vf_out)
{
	struct efrm_vf_nic_params *nic =
		&efrm_vf_manager->nic[client->nic->index];
	struct efrm_vf *vf;
	int rc = 0;
	struct efrm_pd_owner_ids *owner_ids;

	if (nic->vf_count != nic->vfs_probed) {
		EFRM_ERR("%s: not all VFs for NIC %d are discovered yet: "
			 "%d out of %d", __func__, client->nic->index, 
			 nic->vfs_probed, nic->vf_count);
		return -EBUSY;
	}

	spin_lock_bh(&efrm_vf_manager->rm.rm_lock);
	if (list_empty(&nic->free_list)) {
		spin_unlock_bh(&efrm_vf_manager->rm.rm_lock);
		return rc == 0 ? -ENOBUFS : rc;
	}
	vf = list_entry(nic->free_list.next, struct efrm_vf, link);
	list_del(&vf->link);
	spin_unlock_bh(&efrm_vf_manager->rm.rm_lock);

	rc = efrm_vf_alloc_init(vf, linked, use_iommu);
	if (rc != 0) {
		/* Scary warnings are already printed, just return */
		/* Add to the tail of the list in hope another function
		 * is better. */
		list_add_tail(&vf->link,
			      &efrm_vf_manager->nic[vf->nic_index].free_list);
		return rc;
	}

	EFRM_ASSERT(vf);
	EFRM_ASSERT(vf->irq_count);
	EFRM_ASSERT(vf->vi_count);

	rc = efrm_buddy_range_ctor(&vf->vi_instances, vf->vi_base,
				   vf->vi_base + vf->vi_count);
	if (rc < 0) {
		EFRM_ERR("NIC %d VF %d: efrm_buddy_range_ctor(%d, %d) failed",
			 client->nic->index, vf->pci_dev_fn,
			 vf->vi_base, vf->vi_base + vf->vi_count);
		spin_lock_bh(&efrm_vf_manager->rm.rm_lock);
		list_add(&vf->link,
			 &efrm_vf_manager->nic[vf->nic_index].free_list);
		spin_unlock_bh(&efrm_vf_manager->rm.rm_lock);
		return -ENOMEM;
	}

	EFRM_ASSERT(vf->rs.rs_ref_count == 0);
	efrm_resource_init(&vf->rs, EFRM_RESOURCE_VF, vf->pci_dev_fn);

	efrm_client_add_resource(client, &vf->rs);

	owner_ids = efrm_pd_owner_ids_ctor(
		/* On falcon owner_ids are global, so base this block on the
		 * base vi id to avoid overlap.
		 */
		client->nic->devtype.arch == EFHW_ARCH_EF10 ? 1 : vf->vi_base,
		(1 << vf->vi_scale));
	if (!owner_ids)
		return -ENOMEM;
	vf->owner_ids = owner_ids;

	EFRM_TRACE("NIC %d VF %d allocated",
		   client->nic->index, vf->pci_dev_fn);
	*vf_out = vf;
	return 0;
}
Пример #11
0
static int
efrm_eventq_do_callback(struct efhw_nic *nic, unsigned instance,
			bool is_timeout, int budget)
{
	struct efrm_nic *rnic = efrm_nic(nic);
	efrm_evq_callback_fn handler;
	void *arg;
	struct efrm_nic_per_vi *cb_info;
	int32_t evq_state;
	int32_t new_evq_state;
	struct efrm_vi *virs;
	int bit;
	int rc = 0;

	EFRM_ASSERT(efrm_vi_manager);

	cb_info = &rnic->vis[instance];

	/* Set the BUSY bit and clear WAKEUP_PENDING.  Do this
	 * before waking up the sleeper to avoid races. */
	while (1) {
		evq_state = cb_info->state;
		new_evq_state = evq_state;

		if ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)) != 0) {
			EFRM_ERR("%s:%d: evq_state[%d] corrupted!",
				 __FUNCTION__, __LINE__, instance);
			return 0;
		}

		if (!is_timeout)
			new_evq_state &= ~VI_RESOURCE_EVQ_STATE(WAKEUP_PENDING);

		if (evq_state & VI_RESOURCE_EVQ_STATE(CALLBACK_REGISTERED)) {
			new_evq_state |= VI_RESOURCE_EVQ_STATE(BUSY);
			virs = cb_info->vi;
			if (cmpxchg(&cb_info->state, evq_state,
				    new_evq_state) == evq_state)
				break;
		} else {
			/* Just update the state if necessary. */
			if (new_evq_state == evq_state ||
			    cmpxchg(&cb_info->state, evq_state,
				    new_evq_state) == evq_state)
				return 0;
		}
	}

	if (virs) {
		handler = virs->evq_callback_fn;
		rmb();
		arg = virs->evq_callback_arg;
		EFRM_ASSERT(handler != NULL);
		rc = handler(arg, is_timeout, nic, budget);
	}

	/* Clear the BUSY bit. */
	bit =
	    test_and_clear_bit(VI_RESOURCE_EVQ_STATE_BUSY,
			       &cb_info->state);
	if (!bit) {
		EFRM_ERR("%s:%d: evq_state corrupted!",
			 __FUNCTION__, __LINE__);
	}

	return rc;
}