Esempio n. 1
0
int
efrm_eventq_register_callback(struct efrm_vi *virs,
			      efrm_evq_callback_fn handler, void *arg)
{
	struct efrm_nic_per_vi *cb_info;
	int instance;
	int bit;
	int rc = 0;

	EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0);
	EFRM_ASSERT(virs->q[EFHW_EVQ].capacity != 0);
	EFRM_ASSERT(handler != NULL);

	mutex_lock(&register_evq_cb_mutex);
	if (virs->evq_callback_fn != NULL) {
		rc = -EBUSY;
		goto unlock_and_out;
	}

	virs->evq_callback_arg = arg;
	wmb();
	virs->evq_callback_fn = handler;

	instance = virs->rs.rs_instance;
	cb_info = &efrm_nic(virs->rs.rs_client->nic)->vis[instance];
	cb_info->vi = virs;
	bit = test_and_set_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED,
			       &cb_info->state);
	EFRM_ASSERT(bit == 0);
unlock_and_out:
	mutex_unlock(&register_evq_cb_mutex);
	return rc;
}
Esempio n. 2
0
static int efrm_pd_stack_id_alloc(struct efrm_pd *pd)
{
	struct efrm_nic *nic = efrm_nic(pd->rs.rs_client->nic);
	const int word_bitcount = sizeof(*nic->stack_id_usage) * 8;
	int i, v, bitno, id;

	spin_lock(&nic->lock);
	for (i = 0; i < sizeof(nic->stack_id_usage) /
		     sizeof(*nic->stack_id_usage) &&
		     ((v = nic->stack_id_usage[i]) == ~0u); ++i)
		;
	bitno = v ? ci_ffs64(~v) - 1 : 0;
	id = i * word_bitcount + bitno + 1;
	if (id <= EFRM_MAX_STACK_ID)
		nic->stack_id_usage[i] |= 1 << bitno;
	spin_unlock(&nic->lock);

	if (id > EFRM_MAX_STACK_ID) {
		/* we run out of stack ids suppression of self traffic
		 * is not possible. */
		EFRM_TRACE("%s: WARNING: no free stack ids", __FUNCTION__);
		pd->stack_id = 0;
		return -ENOMEM;
	}
	pd->stack_id = id;
	return 0;
}
Esempio n. 3
0
int efrm_port_sniff(struct efrm_resource *rs, int enable, int promiscuous,
		    int rss_context)
{
	int rc;
	ci_int32 owner;
	struct efhw_nic *nic = rs->rs_client->nic;

	if( enable && !capable(CAP_NET_ADMIN) )
		return -EPERM;

	/* Check that the current sniff owner is valid for the operation we're
	 * doing, and mark the op as in progress.
	 */
	if( enable ) {
		if( ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq,
				  EFRM_PORT_SNIFF_NO_OWNER,
				  EFRM_PORT_SNIFF_OP_IN_PROGRESS) )
			return -EBUSY;
	}
	else {
		if( ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq,
				  rs->rs_instance,
				  EFRM_PORT_SNIFF_OP_IN_PROGRESS) )
			return -EBUSY;
	}

	EFRM_RESOURCE_ASSERT_VALID(rs, 0);
	rc = efhw_nic_set_port_sniff(nic, rs->rs_instance, enable,
				     promiscuous, rss_context);

	if( (enable && rc == 0) || (!enable && rc != 0) )
		owner = rs->rs_instance;
	else
		owner = EFRM_PORT_SNIFF_NO_OWNER;

	EFRM_VERIFY_EQ(ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq,
				     EFRM_PORT_SNIFF_OP_IN_PROGRESS,
				     owner), 0);

	return rc;
}
Esempio n. 4
0
static void efrm_pd_stack_id_free(struct efrm_pd *pd)
{
	if (pd->stack_id != 0) {
		struct efrm_nic *nic = efrm_nic(pd->rs.rs_client->nic);
		const int word_bitcount = sizeof(*nic->stack_id_usage) * 8;
		int id = pd->stack_id - 1;
		int i = id / word_bitcount;
		int bitno = id % word_bitcount;
		spin_lock(&nic->lock);
		nic->stack_id_usage[i] &= ~(1 << bitno);
		spin_unlock(&nic->lock);
	}
}
Esempio n. 5
0
void efrm_eventq_kill_callback(struct efrm_vi *virs)
{
	struct efrm_nic_per_vi *cb_info;
	int32_t evq_state;
	int instance;
	int bit;

	EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0);
	EFRM_ASSERT(virs->q[EFHW_EVQ].capacity != 0);
	EFRM_ASSERT(virs->rs.rs_client != NULL);

	mutex_lock(&register_evq_cb_mutex);

	instance = virs->rs.rs_instance;
	cb_info = &efrm_nic(virs->rs.rs_client->nic)->vis[instance];
	cb_info->vi = NULL;

	/* Disable the callback. */
#ifdef CONFIG_SFC_RESOURCE_VF
	if (virs->allocation.vf)
		spin_lock(&virs->allocation.vf->vf_evq_cb_lock);
#endif
	bit = test_and_clear_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED,
				 &cb_info->state);
	EFRM_ASSERT(bit);	/* do not call me twice! */
#ifdef CONFIG_SFC_RESOURCE_VF
	if (virs->allocation.vf)
		spin_unlock(&virs->allocation.vf->vf_evq_cb_lock);
#endif

	/* If the vi had been primed, unset it. */
	test_and_clear_bit(VI_RESOURCE_EVQ_STATE_WAKEUP_PENDING,
			   &cb_info->state);

	/* Spin until the callback is complete. */
	do {
		rmb();

		udelay(1);
		evq_state = cb_info->state;
	} while ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)));

	virs->evq_callback_fn = NULL;
	mutex_unlock(&register_evq_cb_mutex);
}
Esempio n. 6
0
static int
efrm_eventq_do_callback(struct efhw_nic *nic, unsigned instance,
			bool is_timeout, int budget)
{
	struct efrm_nic *rnic = efrm_nic(nic);
	efrm_evq_callback_fn handler;
	void *arg;
	struct efrm_nic_per_vi *cb_info;
	int32_t evq_state;
	int32_t new_evq_state;
	struct efrm_vi *virs;
	int bit;
	int rc = 0;

	EFRM_ASSERT(efrm_vi_manager);

	cb_info = &rnic->vis[instance];

	/* Set the BUSY bit and clear WAKEUP_PENDING.  Do this
	 * before waking up the sleeper to avoid races. */
	while (1) {
		evq_state = cb_info->state;
		new_evq_state = evq_state;

		if ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)) != 0) {
			EFRM_ERR("%s:%d: evq_state[%d] corrupted!",
				 __FUNCTION__, __LINE__, instance);
			return 0;
		}

		if (!is_timeout)
			new_evq_state &= ~VI_RESOURCE_EVQ_STATE(WAKEUP_PENDING);

		if (evq_state & VI_RESOURCE_EVQ_STATE(CALLBACK_REGISTERED)) {
			new_evq_state |= VI_RESOURCE_EVQ_STATE(BUSY);
			virs = cb_info->vi;
			if (cmpxchg(&cb_info->state, evq_state,
				    new_evq_state) == evq_state)
				break;
		} else {
			/* Just update the state if necessary. */
			if (new_evq_state == evq_state ||
			    cmpxchg(&cb_info->state, evq_state,
				    new_evq_state) == evq_state)
				return 0;
		}
	}

	if (virs) {
		handler = virs->evq_callback_fn;
		rmb();
		arg = virs->evq_callback_arg;
		EFRM_ASSERT(handler != NULL);
		rc = handler(arg, is_timeout, nic, budget);
	}

	/* Clear the BUSY bit. */
	bit =
	    test_and_clear_bit(VI_RESOURCE_EVQ_STATE_BUSY,
			       &cb_info->state);
	if (!bit) {
		EFRM_ERR("%s:%d: evq_state corrupted!",
			 __FUNCTION__, __LINE__);
	}

	return rc;
}