Ejemplo n.º 1
0
static void poll_catas(struct timer_list *t)
{
	struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
	struct mlx4_dev *dev = &priv->dev;
	u32 slave_read;

	if (mlx4_is_slave(dev)) {
		slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
		if (mlx4_comm_internal_err(slave_read)) {
			mlx4_warn(dev, "Internal error detected on the communication channel\n");
			goto internal_err;
		}
	} else if (readl(priv->catas_err.map)) {
		dump_err_buf(dev);
		goto internal_err;
	}

	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
		mlx4_warn(dev, "Internal error mark was detected on device\n");
		goto internal_err;
	}

	mod_timer(&priv->catas_err.timer,
		  round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
	return;

internal_err:
	if (mlx4_internal_err_reset)
		queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
}
Ejemplo n.º 2
0
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
	struct mlx4_en_profile *params = &mdev->profile;
	int i;

	params->tcp_rss = tcp_rss;
	params->udp_rss = udp_rss;
	if (params->udp_rss && !(mdev->dev->caps.flags
					& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
		mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
		params->udp_rss = 0;
	}
	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
		params->prof[i].rx_pause = 1;
		params->prof[i].rx_ppp = pfcrx;
		params->prof[i].tx_pause = 1;
		params->prof[i].tx_ppp = pfctx;
		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
		params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
			(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
	}

	if ( num_lro != ~0 || rss_mask != ~0 || rss_xor != ~0 )
		mlx4_warn(mdev, "Obsolete parameter passed, ignoring.\n");
	return 0;
}
Ejemplo n.º 3
0
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;

	if (index < MLX4_VLAN_REGULAR) {
		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
		return;
	}

	mutex_lock(&table->mutex);
	if (!table->refs[index]) {
		mlx4_warn(dev, "No vlan entry for index %d\n", index);
		goto out;
	}
	if (--table->refs[index]) {
		mlx4_dbg(dev, "Have more references for index %d,"
			 "no need to modify vlan table\n", index);
		goto out;
	}
	table->entries[index] = 0;
	mlx4_set_port_vlan_table(dev, port, table->entries);
	--table->total;
out:
	mutex_unlock(&table->mutex);
}
Ejemplo n.º 4
0
static int mlx4_en_fill_rx_buf(struct net_device *dev,
			       struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int num = 0;
	int err;

	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
		err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
					      ring->size_mask);
		if (err) {
			if (netif_msg_rx_err(priv))
				mlx4_warn(priv->mdev,
					  "Failed preparing rx descriptor\n");
			priv->port_stats.rx_alloc_failed++;
			break;
		}
		++num;
		++ring->prod;
	}
	if ((u32) (ring->prod - ring->cons) == ring->size)
		ring->full = 1;

	return num;
}
Ejemplo n.º 5
0
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
	struct mlx4_en_profile *params = &mdev->profile;
	int i;

	params->tcp_rss = tcp_rss;
	params->udp_rss = udp_rss;
        if (params->udp_rss && !(mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
		mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
		params->udp_rss = 0;
	}
	params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
	params->ip_reasm = ip_reasm;
	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
		params->prof[i].rx_pause = 1;
		params->prof[i].rx_ppp = pfcrx;
		params->prof[i].tx_pause = 1;
		params->prof[i].tx_ppp = pfctx;
		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
		params->prof[i].tx_ring_num = MLX4_EN_NUM_HASH_RINGS + 1 +
			(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
	}

	return 0;
}
Ejemplo n.º 6
0
void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	phys_addr_t addr;

	INIT_LIST_HEAD(&priv->catas_err.list);
	timer_setup(&priv->catas_err.timer, poll_catas, 0);
	priv->catas_err.map = NULL;

	if (!mlx4_is_slave(dev)) {
		addr = pci_resource_start(dev->persist->pdev,
					  priv->fw.catas_bar) +
					  priv->fw.catas_offset;

		priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
		if (!priv->catas_err.map) {
			mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
				  (unsigned long long)addr);
			return;
		}
	}

	priv->catas_err.timer.expires  =
		round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
	add_timer(&priv->catas_err.timer);
}
Ejemplo n.º 7
0
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rx_ring *ring;
	int ring_ind;
	int buf_ind;

	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
			ring = &priv->rx_ring[ring_ind];

			if (mlx4_en_prepare_rx_desc(priv, ring,
						    ring->actual_size)) {
				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
					mlx4_err(mdev, "Failed to allocate "
						       "enough rx buffers\n");
					return -ENOMEM;
				} else {
					if (netif_msg_rx_err(priv))
						mlx4_warn(mdev,
							  "Only %d buffers allocated\n",
							  ring->actual_size);
					goto out;
				}
			}
			ring->actual_size++;
			ring->prod++;
		}
	}
out:
	return 0;
}
Ejemplo n.º 8
0
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
			  enum mlx4_dev_event event, int port)
{
	struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
	struct mlx4_en_priv *priv;

	if (!mdev->pndev[port])
		return;

	priv = netdev_priv(mdev->pndev[port]);
	switch (event) {
	case MLX4_DEV_EVENT_PORT_UP:
	case MLX4_DEV_EVENT_PORT_DOWN:
		/* To prevent races, we poll the link state in a separate
		  task rather than changing it here */
		priv->link_state = event;
		queue_work(mdev->workqueue, &priv->linkstate_task);
		break;

	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
		mlx4_err(mdev, "Internal error detected, restarting device\n");
		break;

	default:
		mlx4_warn(mdev, "Unhandled event: %d\n", event);
	}
}
Ejemplo n.º 9
0
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
	struct mlx4_en_profile *params = &mdev->profile;
	int i;

	params->udp_rss = udp_rss;
	params->num_tx_rings_p_up = min_t(int, mp_ncpus,
			MLX4_EN_MAX_TX_RING_P_UP);
	if (params->udp_rss && !(mdev->dev->caps.flags
					& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
		mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
		params->udp_rss = 0;
	}
	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
		params->prof[i].rx_pause = 1;
		params->prof[i].rx_ppp = pfcrx;
		params->prof[i].tx_pause = 1;
		params->prof[i].tx_ppp = pfctx;
		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
		params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
			MLX4_EN_NUM_UP;
		params->prof[i].rss_rings = 0;
		params->prof[i].inline_thold = inline_thold;
	}

	return 0;
}
Ejemplo n.º 10
0
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
			  enum mlx4_dev_event event, unsigned long port)
{
	struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
	struct mlx4_en_priv *priv;

	switch (event) {
	case MLX4_DEV_EVENT_PORT_UP:
	case MLX4_DEV_EVENT_PORT_DOWN:
		if (!mdev->pndev[port])
			return;
		priv = netdev_priv(mdev->pndev[port]);
		/* To prevent races, we poll the link state in a separate
		  task rather than changing it here */
		priv->link_state = event;
		queue_work(mdev->workqueue, &priv->linkstate_task);
		break;

	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
		mlx4_err(mdev, "Internal error detected, restarting device\n");
		break;

	case MLX4_DEV_EVENT_SLAVE_INIT:
	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
		break;
	default:
		if (port < 1 || port > dev->caps.num_ports ||
		    !mdev->pndev[port])
			return;
		mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
			  (int) port);
	}
}
Ejemplo n.º 11
0
void mlx4_srq_invalidate(struct mlx4_dev *dev, struct mlx4_srq *srq)
{
	int err;

	err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
	if (err)
		mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
}
Ejemplo n.º 12
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
    struct mlx4_en_dev *mdev = priv->mdev;
    int err = 0;
    char name[25];

    cq->dev = mdev->pndev[priv->port];
    cq->mcq.set_ci_db  = cq->wqres.db.db;
    cq->mcq.arm_db     = cq->wqres.db.db + 1;
    *cq->mcq.set_ci_db = 0;
    *cq->mcq.arm_db    = 0;
    memset(cq->buf, 0, cq->buf_size);

    if (cq->is_tx == RX) {
        if (mdev->dev->caps.comp_pool) {
            if (!cq->vector) {
                sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
                if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
                    cq->vector = (cq->ring + 1 + priv->port) %
                                 mdev->dev->caps.num_comp_vectors;
                    mlx4_warn(mdev, "Failed Assigning an EQ to "
                              "%s_rx-%d ,Falling back to legacy EQ's\n",
                              priv->dev->name, cq->ring);
                }
            }
        } else {
            cq->vector = (cq->ring + 1 + priv->port) %
                         mdev->dev->caps.num_comp_vectors;
        }
    } else {
        if (!cq->vector || !mdev->dev->caps.comp_pool) {
            /*Fallback to legacy pool in case of error*/
            cq->vector   = 0;
        }
    }

    if (!cq->is_tx)
        cq->size = priv->rx_ring[cq->ring].actual_size;

    err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
                        cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
    if (err)
        return err;

    cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
    cq->mcq.event = mlx4_en_cq_event;

    if (cq->is_tx) {
        init_timer(&cq->timer);
        cq->timer.function = mlx4_en_poll_tx_cq;
        cq->timer.data = (unsigned long) cq;
    } else {
        netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
        napi_enable(&cq->napi);
    }

    return 0;
}
Ejemplo n.º 13
0
static int validate_index(struct mlx4_dev *dev,
			  struct mlx4_mac_table *table, int index)
{
	int err = 0;

	if (index < 0 || index >= table->max || !table->entries[index]) {
		mlx4_warn(dev, "No valid Mac entry for the given index\n");
		err = -EINVAL;
	}
	return err;
}
Ejemplo n.º 14
0
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
{
	struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;

	mutex_lock(&table->mutex);
	if (!table->refs[index]) {
		mlx4_warn(dev, "No MAC entry for index %d\n", index);
		goto out;
	}
	if (--table->refs[index]) {
		mlx4_warn(dev, "Have more references for index %d,"
			  "no need to modify MAC table\n", index);
		goto out;
	}
	table->entries[index] = 0;
	mlx4_set_port_mac_table(dev, port, table->entries);
	--table->total;
out:
	mutex_unlock(&table->mutex);
}
static u32 mlx4_en_calc_rings_per_rss(struct mlx4_en_dev *mdev,
					u32 total_rx_ring,
					u32 num_rss_queue,
					u32 requested)
{
	u32 granted = requested;

	if (!requested)
		goto out;

	if (!num_rss_queue) {
		granted = 0;
		goto out;
	}

	/* 1 default ring + 1 regular ring + requested RSS rings */
	if (total_rx_ring < (2 + num_rss_queue * requested)) {

		mlx4_warn(mdev, "not enough free EQs to open netq RSS with "
			  "%u rings per RSS\n", requested);

		/* best effort to open with as many RSS rings as possible */
		while (requested > 2) {
			requested = rounddown_pow_of_two(requested-1);

			/* 1 default ring + 1 regular ring + requested RSS rings */
			if (total_rx_ring >= (2 + num_rss_queue * requested)) {
				mlx4_warn(mdev, "Setting netq_num_rings_per_rss to %u\n",
					  requested);
				granted = requested;
				goto out;
			}
		}

		mlx4_warn(mdev, "disabling netq RSS\n");
		granted = 0;
	}

out:
	return granted;
}
Ejemplo n.º 16
0
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
{
	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
	int i, err = 0;
	int free = -1;

	mutex_lock(&table->mutex);

	if (table->total == table->max) {
		/* No free vlan entries */
		err = -ENOSPC;
		goto out;
	}

	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
		if (free < 0 && (table->refs[i] == 0)) {
			free = i;
			continue;
		}

		if (table->refs[i] &&
		    (vlan == (MLX4_VLAN_MASK &
			      be32_to_cpu(table->entries[i])))) {
			/* Vlan already registered, increase references count */
			*index = i;
			++table->refs[i];
			goto out;
		}
	}

	if (free < 0) {
		err = -ENOMEM;
		goto out;
	}

	/* Register new MAC */
	table->refs[free] = 1;
	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);

	err = mlx4_set_port_vlan_table(dev, port, table->entries);
	if (unlikely(err)) {
		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
		table->refs[free] = 0;
		table->entries[free] = 0;
		goto out;
	}

	*index = free;
	++table->total;
out:
	mutex_unlock(&table->mutex);
	return err;
}
static void mlx4_en_validate_params(struct mlx4_en_dev *mdev)
{
#if defined(__VMKLNX__) && defined(__VMKNETDDI_QUEUEOPS__)
#ifdef __VMKERNEL_RSS_NETQ_SUPPORT__
	if (!netq) {
		/* not using netq so no reason for netq RSS */
		mlx4_warn(mdev, "netq is disabled, setting netq_num_rings_per_rss to 0\n");
		netq_num_rings_per_rss = 0;
	} else {
		if (netq_num_rings_per_rss > MAX_NETQ_NUM_RINGS_PER_RSS) {
			mlx4_warn(mdev, "Unable to set netq_num_rings_per_rss to = %u "
				  "since it is too high, Using %u instead\n",
				  netq_num_rings_per_rss, MAX_NETQ_NUM_RINGS_PER_RSS);
			netq_num_rings_per_rss = MAX_NETQ_NUM_RINGS_PER_RSS;
		} else if (netq_num_rings_per_rss < MIN_NETQ_NUM_RINGS_PER_RSS) {
			mlx4_warn(mdev, "Unable to set netq_num_rings_per_rss to = %u "
				  "since it is too low, Using %u instead\n",
				  netq_num_rings_per_rss, MIN_NETQ_NUM_RINGS_PER_RSS);
			netq_num_rings_per_rss = MIN_NETQ_NUM_RINGS_PER_RSS;
		}

		/* netq_num_rings_per_rss must be even */
		if ((netq_num_rings_per_rss % 2) != 0) {
			--netq_num_rings_per_rss;
			mlx4_warn(mdev, "netq_num_rings_per_rss must be of even value, "
				  "setting it to %u\n", netq_num_rings_per_rss);
		}

		/* netq_num_rings_per_rss must be power of 2 */
		if ((netq_num_rings_per_rss != 0) && (!is_power_of_2(netq_num_rings_per_rss))) {
			mlx4_warn(mdev, "netq_num_rings_per_rss must be power of 2 "
				"rounding down to %lu\n", rounddown_pow_of_two(netq_num_rings_per_rss));
			netq_num_rings_per_rss = rounddown_pow_of_two(netq_num_rings_per_rss);
		}
	}
#endif  /* __VMKERNEL_RSS_NETQ_SUPPORT__ */
#endif	/* NET QUEUE */
}
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
{
	u64 in_param;

	if (mlx4_is_mfunc(dev)) {
		set_param_l(&in_param, srqn);
		if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
			     MLX4_CMD_FREE_RES,
			     MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
			mlx4_warn(dev, "Failed freeing srq:%d\n", srqn);
		return;
	}
	__mlx4_srq_free_icm(dev, srqn);
}
Ejemplo n.º 19
0
Archivo: cq.c Proyecto: Cai900205/test
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
{
	u64 in_param = 0;
	int err;

	if (mlx4_is_mfunc(dev)) {
		set_param_l(&in_param, cqn);
		err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
			       MLX4_CMD_FREE_RES,
			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
		if (err)
			mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
	} else
		__mlx4_cq_free_icm(dev, cqn);
}
Ejemplo n.º 20
0
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
	u64 in_param = 0;
	int err;

	if (mlx4_is_mfunc(dev)) {
		set_param_l(&in_param, xrcdn);
		err = mlx4_cmd(dev, in_param, RES_XRCD,
			       RES_OP_RESERVE, MLX4_CMD_FREE_RES,
			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
		if (err)
			mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn);
	} else
		__mlx4_xrcd_free(dev, xrcdn);
}
Ejemplo n.º 21
0
Archivo: cq.c Proyecto: 274914765/C
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
    struct mlx4_cq *cq;

    cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
                   cqn & (dev->caps.num_cqs - 1));
    if (!cq) {
        mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
        return;
    }

    ++cq->arm_sn;

    cq->comp(cq);
}
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
			  enum mlx4_dev_event event, unsigned long port)
{
	struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
	struct mlx4_en_priv *priv;

	/* check that port param is not a pointer */
	if (port != (port & (unsigned long)0x0FFFF))
		return;

	switch (event) {
	case MLX4_DEV_EVENT_PORT_UP:
		/* To prevent races, we poll the link state in a separate
		  task rather than changing it here */
		if (!mdev->pndev[port])
			return;

		priv = netdev_priv(mdev->pndev[port]);
		priv->link_state = 1;
		queue_work(mdev->workqueue, &priv->linkstate_task);
		break;

	case MLX4_DEV_EVENT_PORT_DOWN:
		/* To prevent races, we poll the link state in a separate
		  task rather than changing it here */
		if (!mdev->pndev[port])
			return;

		priv = netdev_priv(mdev->pndev[port]);
		priv->link_state = 0;
		queue_work(mdev->workqueue, &priv->linkstate_task);
		break;

	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
#ifndef __VMKERNEL_MODULE__
		mlx4_err(mdev, "Internal error detected, restarting device\n");
#else /* __VMKERNEL_MODULE__ */
		mlx4_err(mdev, "Internal error detected, please reload the driver manually\n");
#endif /* __VMKERNEL_MODULE__ */
		break;

	default:
		mlx4_warn(mdev, "Unhandled event: %d\n", event);
	}
}
Ejemplo n.º 23
0
Archivo: cq.c Proyecto: Cai900205/test
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
{
	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
	struct mlx4_cq *cq;

	rcu_read_lock();

	cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));

	if (!cq) {
		rcu_read_unlock();
		mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
		return;
	}

	cq->event(cq, event_type);
	rcu_read_unlock();
}
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
{
	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
	int err;

	err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
	if (err)
		mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);

	spin_lock_irq(&srq_table->lock);
	radix_tree_delete(&srq_table->tree, srq->srqn);
	spin_unlock_irq(&srq_table->lock);

	if (atomic_dec_and_test(&srq->refcount))
		complete(&srq->free);
	wait_for_completion(&srq->free);

	mlx4_srq_free_icm(dev, srq->srqn);
}
Ejemplo n.º 25
0
Archivo: srq.c Proyecto: mdamt/linux
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
{
	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
	struct mlx4_srq *srq;

	rcu_read_lock();
	srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
	rcu_read_unlock();
	if (srq)
		atomic_inc(&srq->refcount);
	else {
		mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
		return;
	}

	srq->event(srq, event_type);

	if (atomic_dec_and_test(&srq->refcount))
		complete(&srq->free);
}
Ejemplo n.º 26
0
void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
{
	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
	u64 in_param;
	int err;

	if (mlx4_is_slave(dev)) {
		*((u32 *) &in_param) = srqn;
		*(((u32 *) &in_param) + 1) = 0;
		err = mlx4_cmd(dev, in_param, RES_SRQ, ICM_RESERVE_AND_ALLOC,
						       MLX4_CMD_FREE_RES,
						       MLX4_CMD_TIME_CLASS_A);
		if (err)
			mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
	} else {
		mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
		mlx4_table_put(dev, &srq_table->table, srqn);
		mlx4_bitmap_free(&srq_table->bitmap, srqn);
	}
}
Ejemplo n.º 27
0
static void mlx4_en_cq_eq_cb(unsigned vector, u32 uuid, void *data)
{
	int err;
	struct mlx4_en_cq **pcq = data;

	if (MLX4_EQ_UUID_TO_ID(uuid) ==  MLX4_EQ_ID_EN) {
		struct mlx4_en_cq *cq = *pcq;
		struct mlx4_en_priv *priv = netdev_priv(cq->dev);
		struct mlx4_en_dev *mdev = priv->mdev;

		if (uuid == MLX4_EQ_ID_TO_UUID(MLX4_EQ_ID_EN, priv->port,
					       pcq - priv->rx_cq)) {
			err = mlx4_rename_eq(mdev->dev, priv->port, vector,
					     MLX4_EN_EQ_NAME_PRIORITY, "%s-%d",
					     priv->dev->name, cq->ring);
			if (err)
				mlx4_warn(mdev, "Failed to rename EQ, continuing with default name\n");
		}
	}
}
Ejemplo n.º 28
0
Archivo: cq.c Proyecto: Cai900205/test
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq, int flags)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_cq_table *cq_table = &priv->cq_table;
	int err;

	err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
	if (err)
		mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);

	synchronize_irq(priv->eq_table.eq[cq->vector].irq);
	spin_lock_irq(&cq_table->lock);
	radix_tree_delete(&cq_table->tree, cq->cqn);
	spin_unlock_irq(&cq_table->lock);
	if (flags & MLX4_RCU_USE_EXPEDITED)
		synchronize_rcu_expedited();
	else
		synchronize_rcu();

	mlx4_cq_free_icm(dev, cq->cqn);
}
Ejemplo n.º 29
0
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
{
    struct mlx4_priv *priv = mlx4_priv(dev);
    struct mlx4_cq_table *cq_table = &priv->cq_table;
    int err;

    err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
    if (err)
        mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);

    synchronize_irq(priv->eq_table.eq[cq->vector].irq);

    spin_lock_irq(&cq_table->lock);
    radix_tree_delete(&cq_table->tree, cq->cqn);
    spin_unlock_irq(&cq_table->lock);

    if (atomic_dec_and_test(&cq->refcount))
        complete(&cq->free);
    wait_for_completion(&cq->free);

    mlx4_cq_free_icm(dev, cq->cqn);
}
Ejemplo n.º 30
0
Archivo: cq.c Proyecto: 274914765/C
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
{
    struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
    struct mlx4_cq *cq;

    spin_lock(&cq_table->lock);

    cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
    if (cq)
        atomic_inc(&cq->refcount);

    spin_unlock(&cq_table->lock);

    if (!cq) {
        mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
        return;
    }

    cq->event(cq, event_type);

    if (atomic_dec_and_test(&cq->refcount))
        complete(&cq->free);
}