Exemple #1
0
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct skb_frag_struct *skb_frags;
	struct mlx4_en_rx_desc *rx_desc;
	dma_addr_t dma;
	int index;
	int nr;

	mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
			ring->cons, ring->prod);

	/* Unmap and free Rx buffers */
	BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
	while (ring->cons != ring->prod) {
		index = ring->cons & ring->size_mask;
		rx_desc = ring->buf + (index << ring->log_stride);
		skb_frags = ring->rx_info + (index << priv->log_rx_info);
		mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);

		for (nr = 0; nr < priv->num_frags; nr++) {
			mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
			dma = be64_to_cpu(rx_desc->data[nr].addr);

			mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
			pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
					 PCI_DMA_FROMDEVICE);
			put_page(skb_frags[nr].page);
		}
		++ring->cons;
	}
}
Exemple #2
0
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
{
	struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
	int i, err = 0;
	int free = -1;

	mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
	mutex_lock(&table->mutex);
	for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
		if (free < 0 && !table->refs[i]) {
			free = i;
			continue;
		}

		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
			/* MAC already registered, increase refernce count */
			*index = i;
			++table->refs[i];
			goto out;
		}
	}

	if (free < 0) {
		err = -ENOMEM;
		goto out;
	}

	mlx4_dbg(dev, "Free MAC index is %d\n", free);

	if (table->total == table->max) {
		/* No free mac entries */
		err = -ENOSPC;
		goto out;
	}

	/* Register new MAC */
	table->refs[free] = 1;
	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);

	err = mlx4_set_port_mac_table(dev, port, table->entries);
	if (unlikely(err)) {
		mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
		table->refs[free] = 0;
		table->entries[free] = 0;
		goto out;
	}

	*index = free;
	++table->total;
out:
	mutex_unlock(&table->mutex);
	return err;
}
Exemple #3
0
/* Calculate rss size and map each entry in rss table to rx ring */
void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
				 struct mlx4_en_rss_map *rss_map,
				 int num_entries, int num_rings)
{
	int i;

	rss_map->size = roundup_pow_of_two(num_entries);
	mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
		 rss_map->size);

	for (i = 0; i < rss_map->size; i++) {
		rss_map->map[i] = i % num_rings;
		mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
	}
}
Exemple #4
0
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;

	if (index < MLX4_VLAN_REGULAR) {
		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
		return;
	}

	mutex_lock(&table->mutex);
	if (!table->refs[index]) {
		mlx4_warn(dev, "No vlan entry for index %d\n", index);
		goto out;
	}
	if (--table->refs[index]) {
		mlx4_dbg(dev, "Have more references for index %d,"
			 "no need to modify vlan table\n", index);
		goto out;
	}
	table->entries[index] = 0;
	mlx4_set_port_vlan_table(dev, port, table->entries);
	--table->total;
out:
	mutex_unlock(&table->mutex);
}
Exemple #5
0
static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
				  struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_rx_alloc *page_alloc;
	int i;

	for (i = 0; i < priv->num_frags; i++) {
		page_alloc = &ring->page_alloc[i];
		page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
					       MLX4_EN_ALLOC_ORDER);
		if (!page_alloc->page)
			goto out;

		page_alloc->offset = priv->frag_info[i].frag_align;
		mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
			 i, page_alloc->page);
	}
	return 0;

out:
	while (i--) {
		page_alloc = &ring->page_alloc[i];
		put_page(page_alloc->page);
		page_alloc->page = NULL;
	}
	return -ENOMEM;
}
Exemple #6
0
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
			   struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err;
	int tmp;

	/* Sanity check SRQ size before proceeding */
	if (size >= mdev->dev->caps.max_srq_wqes)
		return -EINVAL;

	ring->prod = 0;
	ring->cons = 0;
	ring->size = size;
	ring->size_mask = size - 1;
	ring->stride = stride;
	ring->log_stride = ffs(ring->stride) - 1;
	ring->buf_size = ring->size * ring->stride;

	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
					sizeof(struct skb_frag_struct));
	ring->rx_info = vmalloc(tmp);
	if (!ring->rx_info) {
		mlx4_err(mdev, "Failed allocating rx_info ring\n");
		return -ENOMEM;
	}
	mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
		 ring->rx_info, tmp);

	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
				 ring->buf_size, 2 * PAGE_SIZE);
	if (err)
		goto err_ring;

	err = mlx4_en_map_buffer(&ring->wqres.buf);
	if (err) {
		mlx4_err(mdev, "Failed to map RX buffer\n");
		goto err_hwq;
	}
	ring->buf = ring->wqres.buf.direct.buf;

	/* Allocate LRO sessions */
	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
		mlx4_err(mdev, "Failed allocating lro sessions\n");
		goto err_map;
	}

	return 0;

err_map:
	mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq:
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_ring:
	vfree(ring->rx_info);
	ring->rx_info = NULL;
	return err;
}
Exemple #7
0
/* Calculate the last offset position that accomodates a full fragment
 * (assuming fagment size = stride-align) */
static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
{
	u16 res = MLX4_EN_ALLOC_SIZE % stride;
	u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;

	mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
			    "res:%d offset:%d\n", stride, align, res, offset);
	return offset;
}
Exemple #8
0
struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
			       struct mlx4_en_rx_desc *rx_desc,
			       struct skb_frag_struct *skb_frags,
			       struct mlx4_en_rx_alloc *page_alloc,
			       unsigned int length)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct sk_buff *skb;
	void *va;
	int used_frags;
	dma_addr_t dma;

	skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
	if (!skb) {
		mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
		return NULL;
	}
	skb->dev = priv->dev;
	skb_reserve(skb, NET_IP_ALIGN);
	skb->len = length;
	skb->truesize = length + sizeof(struct sk_buff);

	/* Get pointer to first fragment so we could copy the headers into the
	 * (linear part of the) skb */
	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;

	if (length <= SMALL_PACKET_SIZE) {
		/* We are copying all relevant data to the skb - temporarily
		 * synch buffers for the copy */
		dma = be64_to_cpu(rx_desc->data[0].addr);
		dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
					      length, DMA_FROM_DEVICE);
		skb_copy_to_linear_data(skb, va, length);
		dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
						 length, DMA_FROM_DEVICE);
		skb->tail += length;
	} else {

		/* Move relevant fragments to skb */
		used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
						      skb_shinfo(skb)->frags,
						      page_alloc, length);
		skb_shinfo(skb)->nr_frags = used_frags;

		/* Copy headers into the skb linear buffer */
		memcpy(skb->data, va, HEADER_COPY_SIZE);
		skb->tail += HEADER_COPY_SIZE;

		/* Skip headers in first fragment */
		skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;

		/* Adjust size of first fragment */
		skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
		skb->data_len = length - HEADER_COPY_SIZE;
	}
	return skb;
}
Exemple #9
0
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
	int buf_size = 0;
	int i = 0;

	while (buf_size < eff_mtu) {
		priv->frag_info[i].frag_size =
			(eff_mtu > buf_size + frag_sizes[i]) ?
				frag_sizes[i] : eff_mtu - buf_size;
		priv->frag_info[i].frag_prefix_size = buf_size;
		if (!i)	{
			priv->frag_info[i].frag_align = NET_IP_ALIGN;
			priv->frag_info[i].frag_stride =
				ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
		} else {
			priv->frag_info[i].frag_align = 0;
			priv->frag_info[i].frag_stride =
				ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
		}
		priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
						priv, priv->frag_info[i].frag_stride,
						priv->frag_info[i].frag_align);
		buf_size += priv->frag_info[i].frag_size;
		i++;
	}

	priv->num_frags = i;
	priv->rx_skb_size = eff_mtu;
	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));

	mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
		  "num_frags:%d):\n", eff_mtu, priv->num_frags);
	for (i = 0; i < priv->num_frags; i++) {
		mlx4_dbg(DRV, priv, "  frag:%d - size:%d prefix:%d align:%d "
				"stride:%d last_offset:%d\n", i,
				priv->frag_info[i].frag_size,
				priv->frag_info[i].frag_prefix_size,
				priv->frag_info[i].frag_align,
				priv->frag_info[i].frag_stride,
				priv->frag_info[i].last_offset);
	}
}
Exemple #10
0
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
    struct mlx4_cq *cq;

    cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
                           cqn & (dev->caps.num_cqs - 1));
    if (!cq) {
        mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
        return;
    }

    ++cq->arm_sn;

    cq->comp(cq);
}
Exemple #11
0
static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_rx_alloc *page_alloc;
	int i;

	for (i = 0; i < priv->num_frags; i++) {
		page_alloc = &ring->page_alloc[i];
		mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
			 i, page_count(page_alloc->page));

		put_page(page_alloc->page);
		page_alloc->page = NULL;
	}
}
Exemple #12
0
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
	struct mlx4_cq *cq;

	rcu_read_lock();
	cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
			       cqn & (dev->caps.num_cqs - 1));
	if (cq)
		atomic_inc(&cq->refcount);
	rcu_read_unlock();

	if (!cq) {
		mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
		return;
	}

	++cq->arm_sn;

	cq->comp(cq);

	if (atomic_dec_and_test(&cq->refcount))
		complete(&cq->free);
}
Exemple #13
0
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
{
	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
	struct mlx4_mac_table *table = &info->mac_table;
	struct mlx4_mac_entry *entry;
	int i, err = 0;
	int free = -1;

	if (dev->caps.vep_uc_steering) {
		err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
		if (!err) {
			entry = kmalloc(sizeof *entry, GFP_KERNEL);
			if (!entry) {
				mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
				return -ENOMEM;
			}
			entry->mac = mac;
			err = radix_tree_insert(&info->mac_tree, *qpn, entry);
			if (err) {
				mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
				return err;
			}
		} else
			return err;
	}

	mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
	mutex_lock(&table->mutex);
	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
		if (free < 0 && !table->entries[i]) {
			free = i;
			continue;
		}

		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
			/* MAC already registered, Must not have duplicates */
			err = -EEXIST;
			goto out;
		}
	}

	mlx4_dbg(dev, "Free MAC index is %d\n", free);

	if (table->total == table->max) {
		/* No free mac entries */
		err = -ENOSPC;
		goto out;
	}

	/* Register new MAC */
	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);

	err = mlx4_set_port_mac_table(dev, port, table->entries);
	if (unlikely(err)) {
		mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
		table->entries[free] = 0;
		goto out;
	}

	if (!dev->caps.vep_uc_steering)
		*qpn = info->base_qpn + free;
	++table->total;
out:
	mutex_unlock(&table->mutex);
	return err;
}
Exemple #14
0
static int mlx4_setup_hca(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;
	int port;
	__be32 ib_port_default_caps;

	err = mlx4_init_uar_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "user access region table, aborting.\n");
		return err;
	}

	err = mlx4_uar_alloc(dev, &priv->driver_uar);
	if (err) {
		mlx4_err(dev, "Failed to allocate driver access region, "
			 "aborting.\n");
		goto err_uar_table_free;
	}

	priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
	if (!priv->kar) {
		mlx4_err(dev, "Couldn't map kernel access region, "
			 "aborting.\n");
		err = -ENOMEM;
		goto err_uar_free;
	}

	err = mlx4_init_pd_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "protection domain table, aborting.\n");
		goto err_kar_unmap;
	}

	err = mlx4_init_mr_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "memory region table, aborting.\n");
		goto err_pd_table_free;
	}

	err = mlx4_init_eq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "event queue table, aborting.\n");
		goto err_mr_table_free;
	}

	err = mlx4_cmd_use_events(dev);
	if (err) {
		mlx4_err(dev, "Failed to switch to event-driven "
			 "firmware commands, aborting.\n");
		goto err_eq_table_free;
	}

	err = mlx4_NOP(dev);
	if (err) {
		if (dev->flags & MLX4_FLAG_MSI_X) {
			mlx4_warn(dev, "NOP command failed to generate MSI-X "
				  "interrupt IRQ %d).\n",
				  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
			mlx4_warn(dev, "Trying again without MSI-X.\n");
		} else {
			mlx4_err(dev, "NOP command failed to generate interrupt "
				 "(IRQ %d), aborting.\n",
				 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
		}

		goto err_cmd_poll;
	}

	mlx4_dbg(dev, "NOP command IRQ test passed\n");

	err = mlx4_init_cq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "completion queue table, aborting.\n");
		goto err_cmd_poll;
	}

	err = mlx4_init_srq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "shared receive queue table, aborting.\n");
		goto err_cq_table_free;
	}

	err = mlx4_init_qp_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "queue pair table, aborting.\n");
		goto err_srq_table_free;
	}

	err = mlx4_init_mcg_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "multicast group table, aborting.\n");
		goto err_qp_table_free;
	}

	for (port = 1; port <= dev->caps.num_ports; port++) {
		ib_port_default_caps = 0;
		err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
		if (err)
			mlx4_warn(dev, "failed to get port %d default "
				  "ib capabilities (%d). Continuing with "
				  "caps = 0\n", port, err);
		dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
		err = mlx4_SET_PORT(dev, port);
		if (err) {
			mlx4_err(dev, "Failed to set port %d, aborting\n",
				port);
			goto err_mcg_table_free;
		}
	}

	return 0;

err_mcg_table_free:
	mlx4_cleanup_mcg_table(dev);

err_qp_table_free:
	mlx4_cleanup_qp_table(dev);

err_srq_table_free:
	mlx4_cleanup_srq_table(dev);

err_cq_table_free:
	mlx4_cleanup_cq_table(dev);

err_cmd_poll:
	mlx4_cmd_use_polling(dev);

err_eq_table_free:
	mlx4_cleanup_eq_table(dev);

err_mr_table_free:
	mlx4_cleanup_mr_table(dev);

err_pd_table_free:
	mlx4_cleanup_pd_table(dev);

err_kar_unmap:
	iounmap(priv->kar);

err_uar_free:
	mlx4_uar_free(dev, &priv->driver_uar);

err_uar_table_free:
	mlx4_cleanup_uar_table(dev);
	return err;
}
Exemple #15
0
static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;

	err = mlx4_init_uar_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "user access region table, aborting.\n");
		return err;
	}

	err = mlx4_uar_alloc(dev, &priv->driver_uar);
	if (err) {
		mlx4_err(dev, "Failed to allocate driver access region, "
			 "aborting.\n");
		goto err_uar_table_free;
	}

	priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
	if (!priv->kar) {
		mlx4_err(dev, "Couldn't map kernel access region, "
			 "aborting.\n");
		err = -ENOMEM;
		goto err_uar_free;
	}

	err = mlx4_init_pd_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "protection domain table, aborting.\n");
		goto err_kar_unmap;
	}

	err = mlx4_init_mr_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "memory region table, aborting.\n");
		goto err_pd_table_free;
	}

	mlx4_map_catas_buf(dev);

	err = mlx4_init_eq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "event queue table, aborting.\n");
		goto err_catas_buf;
	}

	err = mlx4_cmd_use_events(dev);
	if (err) {
		mlx4_err(dev, "Failed to switch to event-driven "
			 "firmware commands, aborting.\n");
		goto err_eq_table_free;
	}

	err = mlx4_NOP(dev);
	if (err) {
		mlx4_err(dev, "NOP command failed to generate interrupt "
			 "(IRQ %d), aborting.\n",
			 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
		if (dev->flags & MLX4_FLAG_MSI_X)
			mlx4_err(dev, "Try again with MSI-X disabled.\n");
		else
			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");

		goto err_cmd_poll;
	}

	mlx4_dbg(dev, "NOP command IRQ test passed\n");

	err = mlx4_init_cq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "completion queue table, aborting.\n");
		goto err_cmd_poll;
	}

	err = mlx4_init_srq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "shared receive queue table, aborting.\n");
		goto err_cq_table_free;
	}

	err = mlx4_init_qp_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "queue pair table, aborting.\n");
		goto err_srq_table_free;
	}

	err = mlx4_init_mcg_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "multicast group table, aborting.\n");
		goto err_qp_table_free;
	}

	return 0;

err_qp_table_free:
	mlx4_cleanup_qp_table(dev);

err_srq_table_free:
	mlx4_cleanup_srq_table(dev);

err_cq_table_free:
	mlx4_cleanup_cq_table(dev);

err_cmd_poll:
	mlx4_cmd_use_polling(dev);

err_eq_table_free:
	mlx4_cleanup_eq_table(dev);

err_catas_buf:
	mlx4_unmap_catas_buf(dev);
	mlx4_cleanup_mr_table(dev);

err_pd_table_free:
	mlx4_cleanup_pd_table(dev);

err_kar_unmap:
	iounmap(priv->kar);

err_uar_free:
	mlx4_uar_free(dev, &priv->driver_uar);

err_uar_table_free:
	mlx4_cleanup_uar_table(dev);
	return err;
}
Exemple #16
0
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	struct mlx4_qp_context context;
	struct mlx4_en_rss_context *rss_context;
	void *ptr;
	int rss_xor = mdev->profile.rss_xor;
	u8 rss_mask = mdev->profile.rss_mask;
	int i, srqn, qpn, cqn;
	int err = 0;
	int good_qps = 0;

	mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
	err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
				    rss_map->size, &rss_map->base_qpn);
	if (err) {
		mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
			 rss_map->size, priv->port);
		return err;
	}

	for (i = 0; i < rss_map->size; i++) {
		cqn = priv->rx_ring[rss_map->map[i]].cqn;
		srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
		qpn = rss_map->base_qpn + i;
		err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
					    &rss_map->state[i],
					    &rss_map->qps[i]);
		if (err)
			goto rss_err;

		++good_qps;
	}

	/* Configure RSS indirection qp */
	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
	if (err) {
		mlx4_err(mdev, "Failed to reserve range for RSS "
			       "indirection qp\n");
		goto rss_err;
	}
	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
	if (err) {
		mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
		goto reserve_err;
	}
	rss_map->indir_qp.event = mlx4_en_sqp_event;
	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
				priv->rx_ring[0].cqn, 0, &context);

	ptr = ((void *) &context) + 0x3c;
	rss_context = (struct mlx4_en_rss_context *) ptr;
	rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
					    (rss_map->base_qpn));
	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
	rss_context->hash_fn = rss_xor & 0x3;
	rss_context->flags = rss_mask << 2;

	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
			       &rss_map->indir_qp, &rss_map->indir_state);
	if (err)
		goto indir_err;

	return 0;

indir_err:
	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
reserve_err:
	mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
rss_err:
	for (i = 0; i < good_qps; i++) {
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
	return err;
}
Exemple #17
0
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_cqe *cqe;
	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
	struct skb_frag_struct *skb_frags;
	struct mlx4_en_rx_desc *rx_desc;
	struct sk_buff *skb;
	int index;
	unsigned int length;
	int polled = 0;
	int ip_summed;

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
	cqe = &cq->buf[index];

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

		skb_frags = ring->rx_info + (index << priv->log_rx_info);
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
			mlx4_err(mdev, "CQE completed in error - vendor "
				  "syndrom:%d syndrom:%d\n",
				  ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
				  ((struct mlx4_err_cqe *) cqe)->syndrome);
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
			mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
			goto next;
		}

		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
		ring->bytes += length;
		ring->packets++;

		if (likely(priv->rx_csum)) {
			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
			    (cqe->checksum == 0xffff)) {
				priv->port_stats.rx_chksum_good++;
				if (mdev->profile.num_lro &&
				    !mlx4_en_lro_rx(priv, ring, rx_desc,
						    skb_frags, length, cqe))
					goto next;

				/* LRO not possible, complete processing here */
				ip_summed = CHECKSUM_UNNECESSARY;
				INC_PERF_COUNTER(priv->pstats.lro_misses);
			} else {
				ip_summed = CHECKSUM_NONE;
				priv->port_stats.rx_chksum_none++;
			}
		} else {
			ip_summed = CHECKSUM_NONE;
			priv->port_stats.rx_chksum_none++;
		}

		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
				     ring->page_alloc, length);
		if (!skb) {
			priv->stats.rx_dropped++;
			goto next;
		}

		skb->ip_summed = ip_summed;
		skb->protocol = eth_type_trans(skb, dev);

		/* Push it up the stack */
		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
				    MLX4_CQE_VLAN_PRESENT_MASK)) {
			vlan_hwaccel_receive_skb(skb, priv->vlgrp,
						be16_to_cpu(cqe->sl_vid));
		} else
			netif_receive_skb(skb);

		dev->last_rx = jiffies;

next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
		cqe = &cq->buf[index];
		if (++polled == budget) {
			/* We are here because we reached the NAPI budget -
			 * flush only pending LRO sessions */
			if (mdev->profile.num_lro)
				mlx4_en_lro_flush(priv, ring, 0);
			goto out;
		}
	}

	/* If CQ is empty flush all LRO sessions unconditionally */
	if (mdev->profile.num_lro)
		mlx4_en_lro_flush(priv, ring, 1);

out:
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
	ring->prod += polled; /* Polled descriptors were realocated in place */
	if (unlikely(!ring->full)) {
		mlx4_en_copy_desc(priv, ring, ring->cons - polled,
				  ring->prod - polled, polled);
		mlx4_en_fill_rx_buf(dev, ring);
	}
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}