Ejemplo n.º 1
0
static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
				      struct sk_buff *skb,
				      struct iwl_tfh_tfd *tfd,
				      struct iwl_cmd_meta *out_meta)
{
	int i;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		dma_addr_t tb_phys;
		int tb_idx;

		if (!skb_frag_size(frag))
			continue;

		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
					   skb_frag_size(frag), DMA_TO_DEVICE);

		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
			return -ENOMEM;
		tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
					      skb_frag_size(frag));
		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
					skb_frag_address(frag),
					skb_frag_size(frag));
		if (tb_idx < 0)
			return tb_idx;

		out_meta->tbs |= BIT(tb_idx);
	}

	return 0;
}
Ejemplo n.º 2
0
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
			    skb_frag_t *frag)
{
	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
					  skb_frag_size(frag), DMA_TO_DEVICE);
	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
		st->dma_flags = 0;
		st->unmap_len = skb_frag_size(frag);
		st->in_len = skb_frag_size(frag);
		st->dma_addr = st->unmap_addr;
		return 0;
	}
	return -ENOMEM;
}
Ejemplo n.º 3
0
Archivo: en_tx.c Proyecto: avagin/linux
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
			     const struct sk_buff *skb,
			     const struct skb_shared_info *shinfo,
			     void *fragptr)
{
	struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
	int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof(*inl);
	unsigned int hlen = skb_headlen(skb);

	if (skb->len <= spc) {
		if (likely(skb->len >= MIN_PKT_LEN)) {
			inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
		} else {
			inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
			memset(((void *)(inl + 1)) + skb->len, 0,
			       MIN_PKT_LEN - skb->len);
		}
		skb_copy_from_linear_data(skb, inl + 1, hlen);
		if (shinfo->nr_frags)
			memcpy(((void *)(inl + 1)) + hlen, fragptr,
			       skb_frag_size(&shinfo->frags[0]));

	} else {
		inl->byte_count = cpu_to_be32(1 << 31 | spc);
		if (hlen <= spc) {
			skb_copy_from_linear_data(skb, inl + 1, hlen);
			if (hlen < spc) {
				memcpy(((void *)(inl + 1)) + hlen,
				       fragptr, spc - hlen);
				fragptr +=  spc - hlen;
			}
			inl = (void *) (inl + 1) + spc;
			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
		} else {
			skb_copy_from_linear_data(skb, inl + 1, spc);
			inl = (void *) (inl + 1) + spc;
			skb_copy_from_linear_data_offset(skb, spc, inl + 1,
							 hlen - spc);
			if (shinfo->nr_frags)
				memcpy(((void *)(inl + 1)) + hlen - spc,
				       fragptr,
				       skb_frag_size(&shinfo->frags[0]));
		}

		dma_wmb();
		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
	}
}
/*
 * Figure out how many ring slots we're going to need to send @skb to
 * the guest. This function is essentially a dry run of
 * netbk_gop_frag_copy.
 */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
	unsigned int count;
	int i, copy_off;

	count = DIV_ROUND_UP(
			offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);

	copy_off = skb_headlen(skb) % PAGE_SIZE;

	if (skb_shinfo(skb)->gso_size)
		count++;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
		unsigned long bytes;
		while (size > 0) {
			BUG_ON(copy_off > MAX_BUFFER_OFFSET);

			if (start_new_rx_buffer(copy_off, size, 0)) {
				count++;
				copy_off = 0;
			}

			bytes = size;
			if (copy_off + bytes > MAX_BUFFER_OFFSET)
				bytes = MAX_BUFFER_OFFSET - copy_off;

			copy_off += bytes;
			size -= bytes;
		}
	}
	return count;
}
Ejemplo n.º 5
0
static int prep_msg(struct vector_private *vp,
	struct sk_buff *skb,
	struct iovec *iov)
{
	int iov_index = 0;
	int nr_frags, frag;
	skb_frag_t *skb_frag;

	nr_frags = skb_shinfo(skb)->nr_frags;
	if (nr_frags > MAX_IOV_SIZE) {
		if (skb_linearize(skb) != 0)
			goto drop;
	}
	if (vp->header_size > 0) {
		iov[iov_index].iov_len = vp->header_size;
		vp->form_header(iov[iov_index].iov_base, skb, vp);
		iov_index++;
	}
	iov[iov_index].iov_base = skb->data;
	if (nr_frags > 0) {
		iov[iov_index].iov_len = skb->len - skb->data_len;
		vp->estats.sg_ok++;
	} else
		iov[iov_index].iov_len = skb->len;
	iov_index++;
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag = &skb_shinfo(skb)->frags[frag];
		iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
		iov[iov_index].iov_len = skb_frag_size(skb_frag);
		iov_index++;
	}
	return iov_index;
drop:
	return -1;
}
Ejemplo n.º 6
0
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
			   struct hv_page_buffer *pb)
{
	u32 slots_used = 0;
	char *data = skb->data;
	int frags = skb_shinfo(skb)->nr_frags;
	int i;

	/* The packet is laid out thus:
	 * 1. hdr
	 * 2. skb linear data
	 * 3. skb fragment data
	 */
	if (hdr != NULL)
		slots_used += fill_pg_buf(virt_to_page(hdr),
					offset_in_page(hdr),
					len, &pb[slots_used]);

	slots_used += fill_pg_buf(virt_to_page(data),
				offset_in_page(data),
				skb_headlen(skb), &pb[slots_used]);

	for (i = 0; i < frags; i++) {
		skb_frag_t *frag = skb_shinfo(skb)->frags + i;

		slots_used += fill_pg_buf(skb_frag_page(frag),
					frag->page_offset,
					skb_frag_size(frag), &pb[slots_used]);
	}
	return slots_used;
}
Ejemplo n.º 7
0
/**
 * Somewhat like skb_shift().
 *
 * Beware: @from can be equal to MAX_SKB_FRAGS if we need to insert a new
 * fragment after the last one.
 */
static int
__extend_pgfrags(struct sk_buff *skb, struct sk_buff *pskb, int from, int n)
{
	int i, n_frag = 0;
	struct skb_shared_info *psi, *si = skb_shinfo(skb);

	if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS - n) {
		skb_frag_t *f;
		struct sk_buff *skb_frag;

		psi = pskb ? skb_shinfo(pskb) : si;
		skb_frag = psi->frag_list;
		n_frag = skb_shinfo(skb)->nr_frags + n - MAX_SKB_FRAGS;

		if (skb_frag && !skb_headlen(skb_frag)
		    && skb_shinfo(skb_frag)->nr_frags <= MAX_SKB_FRAGS - n_frag)
		{
			int r = __extend_pgfrags(skb_frag, NULL, 0, n_frag);
			if (r)
				return r;
		} else {
			skb_frag = alloc_skb(0, GFP_ATOMIC);
			if (!skb_frag)
				return -ENOMEM;
			skb_frag->next = psi->frag_list;
			psi->frag_list = skb_frag;
		}

		for (i = n_frag - 1;
		     i >= 0 && MAX_SKB_FRAGS - n + i >= from; --i)
		{
			f = &si->frags[MAX_SKB_FRAGS - n + i];
			skb_shinfo(skb_frag)->frags[i] = *f;
			ss_skb_adjust_data_len(skb, -skb_frag_size(f));
			ss_skb_adjust_data_len(skb_frag, skb_frag_size(f));
		}
		skb_shinfo(skb_frag)->nr_frags += n_frag;
		skb->ip_summed = CHECKSUM_PARTIAL;
		skb_frag->ip_summed = CHECKSUM_PARTIAL;
	}

	memmove(&si->frags[from + n], &si->frags[from],
		(si->nr_frags - from - n_frag) * sizeof(skb_frag_t));
	si->nr_frags += n - n_frag;

	return 0;
}
Ejemplo n.º 8
0
/**
 * Somewhat like skb_shift().
 * Make room for @n fragments starting with slot @from.
 *
 * Beware: @from can be equal to MAX_SKB_FRAGS when a new fragment
 * is inserted after the last one.
 *
 * @return 0 on success, -errno on failure.
 * @return New SKB in @it->skb if new SKB is allocated.
 */
static int
__extend_pgfrags(struct sk_buff *skb, int from, int n, TfwStr *it)
{
	int i, n_shift, n_excess = 0;
	struct skb_shared_info *si = skb_shinfo(skb);

	BUG_ON(from > si->nr_frags);

	/* No room for @n extra page fragments in the SKB. */
	if (si->nr_frags + n > MAX_SKB_FRAGS) {
		skb_frag_t *f;
		struct sk_buff *nskb;

		/* Allocate a new SKB to hold @n_excess page fragments. */
		nskb = alloc_skb(0, GFP_ATOMIC);
		if (nskb == NULL)
			return -ENOMEM;

		/*
		 * The number of page fragments that don't fit in the SKB
		 * after the room is prepared for @n page fragments.
		 */
		n_excess = si->nr_frags + n - MAX_SKB_FRAGS;

		/* Shift @n_excess number of page fragments to new SKB. */
		if (from < si->nr_frags) {
			for (i = n_excess - 1; i >= 0; --i) {
				f = &si->frags[MAX_SKB_FRAGS - n + i];
				skb_shinfo(nskb)->frags[i] = *f;
				ss_skb_adjust_data_len(skb, -skb_frag_size(f));
				ss_skb_adjust_data_len(nskb, skb_frag_size(f));
			}
		}
		skb_shinfo(nskb)->nr_frags += n_excess;
		it->skb = nskb;
	}

	/* Make room for @n page fragments in the SKB. */
	n_shift = si->nr_frags - from - n_excess;
	BUG_ON(n_shift < 0);
	if (n_shift)
		memmove(&si->frags[from + n],
			&si->frags[from], n_shift * sizeof(skb_frag_t));
	si->nr_frags += n - n_excess;

	return 0;
}
Ejemplo n.º 9
0
/*
 * Prepare an SKB to be transmitted to the frontend.
 *
 * This function is responsible for allocating grant operations, meta
 * structures, etc.
 *
 * It returns the number of meta structures consumed. The number of
 * ring slots used is always equal to the number of meta slots used
 * plus the number of GSO descriptors used. Currently, we use either
 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
 * frontend-side LRO).
 */
static int netbk_gop_skb(struct sk_buff *skb,
			 struct netrx_pending_operations *npo)
{
	struct xenvif *vif = netdev_priv(skb->dev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int i;
	struct xen_netif_rx_request *req;
	struct netbk_rx_meta *meta;
	unsigned char *data;
	int head = 1;
	int old_meta_prod;

	old_meta_prod = npo->meta_prod;

	/* Set up a GSO prefix descriptor, if necessary */
	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
		meta = npo->meta + npo->meta_prod++;
		meta->gso_size = skb_shinfo(skb)->gso_size;
		meta->size = 0;
		meta->id = req->id;
	}

	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
	meta = npo->meta + npo->meta_prod++;

	if (!vif->gso_prefix)
		meta->gso_size = skb_shinfo(skb)->gso_size;
	else
		meta->gso_size = 0;

	meta->size = 0;
	meta->id = req->id;
	npo->copy_off = 0;
	npo->copy_gref = req->gref;

	data = skb->data;
	while (data < skb_tail_pointer(skb)) {
		unsigned int offset = offset_in_page(data);
		unsigned int len = PAGE_SIZE - offset;

		if (data + len > skb_tail_pointer(skb))
			len = skb_tail_pointer(skb) - data;

		netbk_gop_frag_copy(vif, skb, npo,
				    virt_to_page(data), len, offset, &head);
		data += len;
	}

	for (i = 0; i < nr_frags; i++) {
		netbk_gop_frag_copy(vif, skb, npo,
				    skb_frag_page(&skb_shinfo(skb)->frags[i]),
				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
				    skb_shinfo(skb)->frags[i].page_offset,
				    &head);
	}

	return npo->meta_prod - old_meta_prod;
}
Ejemplo n.º 10
0
static void greth_clean_tx_gbit(struct net_device *dev)
{
	struct greth_private *greth;
	struct greth_bd *bdp, *bdp_last_frag;
	struct sk_buff *skb;
	u32 stat;
	int nr_frags, i;

	greth = netdev_priv(dev);

	while (greth->tx_free < GRETH_TXBD_NUM) {

		skb = greth->tx_skbuff[greth->tx_last];

		nr_frags = skb_shinfo(skb)->nr_frags;

		/* We only clean fully completed SKBs */
		bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);

		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
		mb();
		stat = greth_read_bd(&bdp_last_frag->stat);

		if (stat & GRETH_BD_EN)
			break;

		greth->tx_skbuff[greth->tx_last] = NULL;

		greth_update_tx_stats(dev, stat);
		dev->stats.tx_bytes += skb->len;

		bdp = greth->tx_bd_base + greth->tx_last;

		greth->tx_last = NEXT_TX(greth->tx_last);

		dma_unmap_single(greth->dev,
				 greth_read_bd(&bdp->addr),
				 skb_headlen(skb),
				 DMA_TO_DEVICE);

		for (i = 0; i < nr_frags; i++) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			bdp = greth->tx_bd_base + greth->tx_last;

			dma_unmap_page(greth->dev,
				       greth_read_bd(&bdp->addr),
				       skb_frag_size(frag),
				       DMA_TO_DEVICE);

			greth->tx_last = NEXT_TX(greth->tx_last);
		}
		greth->tx_free += nr_frags+1;
		dev_kfree_skb(skb);
	}

	if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
		netif_wake_queue(dev);
}
Ejemplo n.º 11
0
Archivo: packet.c Proyecto: NICMx/Jool
void snapshot_record(struct pkt_snapshot *shot, struct sk_buff *skb)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	unsigned int limit;
	unsigned int i;

	shot->len = skb->len;
	shot->data_len = skb->data_len;
	shot->nr_frags = shinfo->nr_frags;

	limit = SIMPLE_MIN(SNAPSHOT_FRAGS_SIZE, shot->nr_frags);
	for (i = 0; i < limit; i++)
		shot->frags[i] = skb_frag_size(&shinfo->frags[i]);

	/*
	 * Ok so I only have room for SNAPSHOT_FRAGS_SIZE page sizes, unless I
	 * allocate. I don't want to allocate because that's an additional fail
	 * opportunity and I want this to be as unintrusive as possible.
	 *
	 * First of all, since PAGE_SIZE is 4k in my VM, and the typical
	 * Internet MTU is 1500 max, I don't think the packet is going
	 * to have more than one page.
	 *
	 * (Unless IP fragments are being treated as pages, but I don't think
	 * that's the case here because the crashing packet was an ICMP error,
	 * and defrag discards fragmented ICMP errors on reception because they
	 * are BS.)
	 *
	 * Second, even if we get multiple pages, I don't see why would they
	 * have different sizes. Except for the last one, that is.
	 *
	 * (Unless the crashing pages were IP fragments. Again, I don't think
	 * this is the case.)
	 *
	 * Therefore, if the packet has more than SNAPSHOT_FRAGS_SIZE pages,
	 * I'm going to risk it and override the last slottable page size with
	 * the most interesting one. (The last one.)
	 *
	 * Consider that when you're reading the output.
	 */
	if (shot->nr_frags > SNAPSHOT_FRAGS_SIZE) {
		shot->frags[SNAPSHOT_FRAGS_SIZE - 1]
			    = skb_frag_size(&shinfo->frags[shot->nr_frags - 1]);
	}
}
Ejemplo n.º 12
0
static int map_skb(struct device *dev, const struct sk_buff *skb,
		   struct mpodp_tx *tx)
{
	const skb_frag_t *fp, *end;
	const struct skb_shared_info *si;
	int count = 1;
	dma_addr_t handler;

	sg_init_table(tx->sg, MAX_SKB_FRAGS + 1);
	handler = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
	if (dma_mapping_error(dev, handler))
		goto out_err;
	sg_dma_address(&tx->sg[0]) = handler;
	sg_dma_len(&tx->sg[0]) = skb_headlen(skb);

	si = skb_shinfo(skb);
	end = &si->frags[si->nr_frags];
	for (fp = si->frags; fp < end; fp++, count++) {
		handler = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
					 DMA_TO_DEVICE);
		if (dma_mapping_error(dev, handler))
			goto unwind;

		sg_dma_address(&tx->sg[count]) = handler;
		sg_dma_len(&tx->sg[count]) = skb_frag_size(fp);

	}
	sg_mark_end(&tx->sg[count - 1]);
	tx->sg_len = count;

	return 0;

unwind:
	while (fp-- > si->frags)
		dma_unmap_page(dev, sg_dma_address(&tx->sg[--count]),
			       skb_frag_size(fp), DMA_TO_DEVICE);
	dma_unmap_single(dev, sg_dma_address(&tx->sg[0]),
			 skb_headlen(skb), DMA_TO_DEVICE);

out_err:
	return -ENOMEM;
}
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
			     int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
{
	struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
	int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;

	if (skb->len <= spc) {
		inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
		skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
		if (skb_shinfo(skb)->nr_frags)
			memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
			       skb_frag_size(&skb_shinfo(skb)->frags[0]));

	} else {
		inl->byte_count = cpu_to_be32(1 << 31 | spc);
		if (skb_headlen(skb) <= spc) {
			skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
			if (skb_headlen(skb) < spc) {
				memcpy(((void *)(inl + 1)) + skb_headlen(skb),
					fragptr, spc - skb_headlen(skb));
				fragptr +=  spc - skb_headlen(skb);
			}
			inl = (void *) (inl + 1) + spc;
			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
		} else {
			skb_copy_from_linear_data(skb, inl + 1, spc);
			inl = (void *) (inl + 1) + spc;
			skb_copy_from_linear_data_offset(skb, spc, inl + 1,
					skb_headlen(skb) - spc);
			if (skb_shinfo(skb)->nr_frags)
				memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
					fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
		}

		wmb();
		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
	}
	tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
		(!!vlan_tx_tag_present(skb));
	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
Ejemplo n.º 14
0
/**
 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
 * @nn:		NFP Net device
 * @tx_ring:	TX ring structure
 *
 * Assumes that the device is stopped
 */
static void
nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
{
	const struct skb_frag_struct *frag;
	struct netdev_queue *nd_q;
	struct pci_dev *pdev = nn->pdev;

	while (tx_ring->rd_p != tx_ring->wr_p) {
		int nr_frags, fidx, idx;
		struct sk_buff *skb;

		idx = tx_ring->rd_p % tx_ring->cnt;
		skb = tx_ring->txbufs[idx].skb;
		nr_frags = skb_shinfo(skb)->nr_frags;
		fidx = tx_ring->txbufs[idx].fidx;

		if (fidx == -1) {
			/* unmap head */
			dma_unmap_single(&pdev->dev,
					 tx_ring->txbufs[idx].dma_addr,
					 skb_headlen(skb), DMA_TO_DEVICE);
		} else {
			/* unmap fragment */
			frag = &skb_shinfo(skb)->frags[fidx];
			dma_unmap_page(&pdev->dev,
				       tx_ring->txbufs[idx].dma_addr,
				       skb_frag_size(frag), DMA_TO_DEVICE);
		}

		/* check for last gather fragment */
		if (fidx == nr_frags - 1)
			dev_kfree_skb_any(skb);

		tx_ring->txbufs[idx].dma_addr = 0;
		tx_ring->txbufs[idx].skb = NULL;
		tx_ring->txbufs[idx].fidx = -2;

		tx_ring->qcp_rd_p++;
		tx_ring->rd_p++;
	}

	memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
	tx_ring->wr_p = 0;
	tx_ring->rd_p = 0;
	tx_ring->qcp_rd_p = 0;
	tx_ring->wr_ptr_add = 0;

	nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
	netdev_tx_reset_queue(nd_q);
}
Ejemplo n.º 15
0
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
		      const struct mpodp_tx *tx)
{
	const skb_frag_t *fp, *end;
	const struct skb_shared_info *si;
	int count = 1;

	dma_unmap_single(dev, sg_dma_address(&tx->sg[0]), skb_headlen(skb), DMA_TO_DEVICE);

	si = skb_shinfo(skb);
	end = &si->frags[si->nr_frags];
	for (fp = si->frags; fp < end; fp++, count++) {
		dma_unmap_page(dev, sg_dma_address(&tx->sg[count]), skb_frag_size(fp), DMA_TO_DEVICE);
	}
}
Ejemplo n.º 16
0
static int count_skb_frag_slots(struct sk_buff *skb)
{
	int i, frags = skb_shinfo(skb)->nr_frags;
	int pages = 0;

	for (i = 0; i < frags; i++) {
		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
		unsigned long size = skb_frag_size(frag);
		unsigned long offset = frag->page_offset;

		/* Skip unused frames from start of page */
		offset &= ~PAGE_MASK;
		pages += PFN_UP(offset + size);
	}
	return pages;
}
Ejemplo n.º 17
0
/*
 * Figure out how many ring slots we're going to need to send @skb to
 * the guest. This function is essentially a dry run of
 * netbk_gop_frag_copy.
 */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
	unsigned int count;
	int i, copy_off;

	count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);

	copy_off = skb_headlen(skb) % PAGE_SIZE;

	if (skb_shinfo(skb)->gso_size)
		count++;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
		unsigned long bytes;

		offset &= ~PAGE_MASK;

		while (size > 0) {
			BUG_ON(offset >= PAGE_SIZE);
			BUG_ON(copy_off > MAX_BUFFER_OFFSET);

			bytes = PAGE_SIZE - offset;

			if (bytes > size)
				bytes = size;

			if (start_new_rx_buffer(copy_off, bytes, 0)) {
				count++;
				copy_off = 0;
			}

			if (copy_off + bytes > MAX_BUFFER_OFFSET)
				bytes = MAX_BUFFER_OFFSET - copy_off;

			copy_off += bytes;

			offset += bytes;
			size -= bytes;

			if (offset == PAGE_SIZE)
				offset = 0;
		}
	}
	return count;
}
Ejemplo n.º 18
0
static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
				    struct xgene_enet_raw_desc *raw_desc)
{
	struct sk_buff *skb;
	struct device *dev;
	skb_frag_t *frag;
	dma_addr_t *frag_dma_addr;
	u16 skb_index;
	u8 status;
	int i, ret = 0;

	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
	skb = cp_ring->cp_skb[skb_index];
	frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];

	dev = ndev_to_dev(cp_ring->ndev);
	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
			 skb_headlen(skb),
			 DMA_TO_DEVICE);

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		frag = &skb_shinfo(skb)->frags[i];
		dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
			       DMA_TO_DEVICE);
	}

	/* Checking for error */
	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
	if (unlikely(status > 2)) {
		xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
				       status);
		ret = -EIO;
	}

	if (likely(skb)) {
		dev_kfree_skb_any(skb);
	} else {
		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
		ret = -EIO;
	}

	return ret;
}
Ejemplo n.º 19
0
static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
				   struct skb_frag_struct *frags,
				   int len, int true_size,
				   void *mac_hdr,
				   int hlen, __wsum sum,
				   u32 ip_summed)
{
	struct sk_buff *skb;
	struct skb_frag_struct *skb_frags;
	int data_len = len;
	int hdr_len = min(len, hlen);

	skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
	if (!skb)
		return NULL;

	skb_reserve(skb, lro_mgr->frag_align_pad);
	skb->len = len;
	skb->data_len = len - hdr_len;
	skb->truesize += true_size;
	skb->tail += hdr_len;

	memcpy(skb->data, mac_hdr, hdr_len);

	skb_frags = skb_shinfo(skb)->frags;
	while (data_len > 0) {
		*skb_frags = *frags;
		data_len -= skb_frag_size(frags);
		skb_frags++;
		frags++;
		skb_shinfo(skb)->nr_frags++;
	}

	skb_shinfo(skb)->frags[0].page_offset += hdr_len;
	skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);

	skb->ip_summed = ip_summed;
	skb->csum = sum;
	skb->protocol = eth_type_trans(skb, lro_mgr->dev);
	return skb;
}
Ejemplo n.º 20
0
/*
 * Figure out how many ring slots we're going to need to send @skb to
 * the guest. This function is essentially a dry run of
 * netbk_gop_frag_copy.
 */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
	struct xenvif_count_slot_state state;
	unsigned int count;
	unsigned char *data;
	unsigned i;

	state.head = true;
	state.copy_off = 0;

	/* Slot for the first (partial) page of data. */
	count = 1;

	/* Need a slot for the GSO prefix for GSO extra data? */
	if (skb_shinfo(skb)->gso_size)
		count++;

	data = skb->data;
	while (data < skb_tail_pointer(skb)) {
		unsigned long offset = offset_in_page(data);
		unsigned long size = PAGE_SIZE - offset;

		if (data + size > skb_tail_pointer(skb))
			size = skb_tail_pointer(skb) - data;

		count += xenvif_count_frag_slots(vif, offset, size, &state);

		data += size;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;

		count += xenvif_count_frag_slots(vif, offset, size, &state);
	}
	return count;
}
Ejemplo n.º 21
0
static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
{
	int i = 0;
	int len = skb_headlen(skb);
	void *p = skb->data;
	int nr_frags = skb_shinfo(skb)->nr_frags;

	seq_printf(s, "    len = %d\n", len);
	wil_seq_hexdump(s, p, len, "      : ");

	if (nr_frags) {
		seq_printf(s, "    nr_frags = %d\n", nr_frags);
		for (i = 0; i < nr_frags; i++) {
			const struct skb_frag_struct *frag =
					&skb_shinfo(skb)->frags[i];

			len = skb_frag_size(frag);
			p = skb_frag_address_safe(frag);
			seq_printf(s, "    [%2d] : len = %d\n", i, len);
			wil_seq_hexdump(s, p, len, "      : ");
		}
	}
}
Ejemplo n.º 22
0
static void lro_add_frags(struct net_lro_desc *lro_desc,
			  int len, int hlen, int truesize,
			  struct skb_frag_struct *skb_frags,
			  struct iphdr *iph, struct tcphdr *tcph)
{
	struct sk_buff *skb = lro_desc->parent;
	int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);

	lro_add_common(lro_desc, iph, tcph, tcp_data_len);

	skb->truesize += truesize;

	skb_frags[0].page_offset += hlen;
	skb_frag_size_sub(&skb_frags[0], hlen);

	while (tcp_data_len > 0) {
		*(lro_desc->next_frag) = *skb_frags;
		tcp_data_len -= skb_frag_size(skb_frags);
		lro_desc->next_frag++;
		skb_frags++;
		skb_shinfo(skb)->nr_frags++;
	}
}
Ejemplo n.º 23
0
/**
 *	skb_copy_datagram_iovec - Copy a datagram to an iovec.
 *	@skb: buffer to copy
 *	@offset: offset in the buffer to start copying from
 *	@to: io vector to copy to
 *	@len: amount of data to copy from buffer to iovec
 *
 *	Note: the iovec is modified during the copy.
 */
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
			    struct iovec *to, int len)
{
	int start = skb_headlen(skb);
	int i, copy = start - offset;
	struct sk_buff *frag_iter;

	trace_skb_copy_datagram_iovec(skb, len);

	/* Copy header. */
	if (copy > 0) {
		if (copy > len)
			copy = len;
		if (memcpy_toiovec(to, skb->data + offset, copy))
			goto fault;
		if ((len -= copy) == 0)
			return 0;
		offset += copy;
	}

	/* Copy paged appendix. Hmm... why does this look so complicated? */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		int end;
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		WARN_ON(start > offset + len);

		end = start + skb_frag_size(frag);
		if ((copy = end - offset) > 0) {
			int err;
			u8  *vaddr;
			struct page *page = skb_frag_page(frag);

			if (copy > len)
				copy = len;
			vaddr = kmap(page);
			err = memcpy_toiovec(to, vaddr + frag->page_offset +
					     offset - start, copy);
			kunmap(page);
			if (err)
				goto fault;
			if (!(len -= copy))
				return 0;
			offset += copy;
		}
		start = end;
	}

	skb_walk_frags(skb, frag_iter) {
		int end;

		WARN_ON(start > offset + len);

		end = start + frag_iter->len;
		if ((copy = end - offset) > 0) {
			if (copy > len)
				copy = len;
			if (skb_copy_datagram_iovec(frag_iter,
						    offset - start,
						    to, copy))
				goto fault;
			if ((len -= copy) == 0)
				return 0;
			offset += copy;
		}
		start = end;
	}
Ejemplo n.º 24
0
/**
 * Fragment @skb to add some room if @len > 0 or delete data otherwise.
 */
static int
__skb_fragment(struct sk_buff *skb, struct sk_buff *pskb,
	       char *pspt, int len, TfwStr *it)
{
	int i, ret;
	long offset;
	unsigned int d_size;
	struct sk_buff *f_skb, **next_fdp;

	SS_DBG("[%d]: %s: in: len [%d] pspt [%p], skb [%p]: head [%p]"
		" data [%p] tail [%p] end [%p] len [%u] data_len [%u]"
		" truesize [%u] nr_frags [%u]\n",
		smp_processor_id(), __func__, len, pspt, skb, skb->head,
		skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
		skb->len, skb->data_len, skb->truesize,
		skb_shinfo(skb)->nr_frags);
	BUG_ON(!len);

	if (abs(len) > PAGE_SIZE) {
		SS_WARN("Attempt to add or delete too much data: %u\n", len);
		return -EINVAL;
	}

	/*
	 * Use @it to hold the return values from __split_pgfrag()
	 * and __split_linear_data(). @it->ptr, @it->skb, and
	 * @it->flags may be set to actual values. If a new SKB is
	 * allocated, then it is stored in @it->skb. @it->ptr holds
	 * the pointer either to data after the deleted data, or to
	 * the area for new data. @it->flags is set when @it->ptr
	 * points to data in @it->skb. Otherwise, @it->ptr points
	 * to data in @skb.
	 *
	 * Determine where the split begins within the SKB, then do
	 * the job using the right function.
	 */

	/* See if the split starts in the linear data. */
	d_size = skb_headlen(skb);
	offset = pspt - (char *)skb->data;

	if ((offset >= 0) && (offset < d_size)) {
		int t_size = d_size - offset;
		len = max(len, -t_size);
		ret = __split_linear_data(skb, pspt, len, it);
		goto done;
	}

	/* See if the split starts in the page fragments data. */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		d_size = skb_frag_size(frag);
		offset = pspt - (char *)skb_frag_address(frag);

		if ((offset >= 0) && (offset < d_size)) {
			int t_size = d_size - offset;
			len = max(len, -t_size);
			ret = __split_pgfrag(skb, i, offset, len, it);
			goto done;
		}
	}

	/* See if the split starts in the SKB fragments data. */
	skb_walk_frags(skb, f_skb) {
		ret = __skb_fragment(f_skb, skb, pspt, len, it);
		if (ret != -ENOENT)
			return ret;
	}
Ejemplo n.º 25
0
/**
 * Delete @len (the value is positive now) bytes from @frag.
 *
 * @return 0 on success, -errno on failure.
 * @return SKB in @it->skb if new SKB is allocated.
 * @return pointer to data after the deleted area in @it->ptr.
 * @return @it->flags is set if @it->ptr points to data in it->skb.
 */
static int
__split_pgfrag_del(struct sk_buff *skb, int i, int off, int len, TfwStr *it)
{
	int tail_len;
	struct sk_buff *skb_dst;
	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
	struct skb_shared_info *si = skb_shinfo(skb);

	SS_DBG("[%d]: %s: skb [%p] i [%d] off [%d] len [%d] fragsize [%d]\n",
		smp_processor_id(), __func__,
		skb, i, off, len, skb_frag_size(frag));

	if (unlikely(off + len > skb_frag_size(frag))) {
		SS_WARN("Attempt to delete too much\n");
		return -EFAULT;
	}

	/* Fast path: delete a full fragment. */
	if (!off && len == skb_frag_size(frag)) {
		ss_skb_adjust_data_len(skb, -len);
		__skb_frag_unref(frag);
		if (i + 1 < si->nr_frags)
			memmove(&si->frags[i], &si->frags[i + 1],
				(si->nr_frags - i - 1) * sizeof(skb_frag_t));
		--si->nr_frags;
		goto lookup_next_ptr;
	}
	/* Fast path: delete the head part of a fragment. */
	if (!off) {
		frag->page_offset += len;
		skb_frag_size_sub(frag, len);
		ss_skb_adjust_data_len(skb, -len);
		it->ptr = skb_frag_address(frag);
		return 0;
	}
	/* Fast path: delete the tail part of a fragment. */
	if (off + len == skb_frag_size(frag)) {
		skb_frag_size_sub(frag, len);
		ss_skb_adjust_data_len(skb, -len);
		++i;
		goto lookup_next_ptr;
	}

	/*
	 * Delete data in the middle of a fragment. After the data
	 * is deleted the fragment will contain only the head part,
	 * and the tail part is moved to another fragment.
	 * [frag @i] [frag @i+1 - tail data]
	 *
	 * Make room for a fragment right after the @i fragment
	 * to move the tail part of data there.
	 */
	if (__extend_pgfrags(skb, i + 1, 1, it))
		return -EFAULT;

	/* Find the SKB for tail data. */
	skb_dst = (i < MAX_SKB_FRAGS - 1) ? skb : it->skb;

	/* Calculate the length of the tail part. */
	tail_len = skb_frag_size(frag) - off - len;

	/* Trim the fragment with the head part. */
	skb_frag_size_sub(frag, len + tail_len);

	/* Make the fragment with the tail part. */
	i = (i + 1) % MAX_SKB_FRAGS;
	__skb_fill_page_desc(skb_dst, i, skb_frag_page(frag),
			     frag->page_offset + off + len, tail_len);
	__skb_frag_ref(frag);

	/* Adjust SKB data lengths. */
	ss_skb_adjust_data_len(skb, -len);
	if (skb != skb_dst) {
		ss_skb_adjust_data_len(skb, -tail_len);
		ss_skb_adjust_data_len(skb_dst, tail_len);
	}

	/* Get the SKB and the address of data after the deleted area. */
	it->flags = (skb != skb_dst);
	it->ptr = skb_frag_address(&skb_shinfo(skb_dst)->frags[i]);
	return 0;

lookup_next_ptr:
	/* Get the next fragment after the deleted fragment. */
	if (i < si->nr_frags)
		it->ptr = skb_frag_address(&si->frags[i]);
	return 0;
}
Ejemplo n.º 26
0
/**
 * Get room for @len bytes of data starting from offset @off
 * in fragment @i.
 *
 * The room may be found in the preceding fragment if @off is zero.
 * Otherwise, a new fragment is allocated and fragments around the
 * fragment @i are rearranged so that data is not actually split
 * and copied.
 *
 * Note: @off is always within the borders of fragment @i. It can
 * point at the start of a fragment, but it can never point at the
 * location right after the end of a fragment. In other words, @off
 * can be zero, but it can not be equal to the size of fragment @i.
 *
 * @return 0 on success, -errno on failure.
 * @return SKB in @it->skb if new SKB is allocated.
 * @return pointer to the room for new data in @it->ptr.
 * @return @it->flags is set if @it->ptr points to data in it->skb.
 */
static int
__split_pgfrag_add(struct sk_buff *skb, int i, int off, int len, TfwStr *it)
{
	int tail_len;
	struct sk_buff *skb_dst;
	skb_frag_t *frag_dst, *frag = &skb_shinfo(skb)->frags[i];

	SS_DBG("[%d]: %s: skb [%p] i [%d] off [%d] len [%d] fragsize [%d]\n",
		smp_processor_id(), __func__,
		skb, i, off, len, skb_frag_size(frag));
	/*
	 * If @off is zero and there's a preceding page fragment,
	 * then try to append data to that fragment. Go for other
	 * solutions if there's no room.
	 */
	if (!off && i) {
		frag_dst = __check_frag_room(skb, frag - 1, len);
		if (frag_dst) {
			/* Coalesce new data with the fragment. */
			off = skb_frag_size(frag_dst);
			skb_frag_size_add(frag_dst, len);
			ss_skb_adjust_data_len(skb, len);
			it->ptr = (char *)skb_frag_address(frag_dst) + off;
			return 0;
		}
	}

	/*
	 * Make a fragment that can hold @len bytes. If @off is
	 * zero, then data is added at the start of fragment @i.
	 * Make a fragment in slot @i, and the original fragment
	 * is shifted forward. If @off is not zero, then make
	 * a fragment in slot @i+1, and make an extra fragment
	 * in slot @i+2 to hold the tail data.
	 */
	if (__new_pgfrag(skb, len, i + !!off, 1 + !!off, it))
		return -EFAULT;

	/* If @off is zero, the job is done in __new_pgfrag(). */
	if (!off) {
		it->ptr = skb_frag_address(frag);
		return 0;
	}

	/*
	 * If data is added in the middle of a fragment, then split
	 * the fragment. The head of the fragment stays there, and
	 * the tail of the fragment is moved to a new fragment.
	 * The fragment for new data is placed in between.
	 * [frag @i] [frag @i+1 - new data] [frag @i+2 - tail data]
	 * If @i is close to MAX_SKB_FRAGS, then new fragments may
	 * be located in another SKB.
	 */

	/* Find the SKB for tail data. */
	skb_dst = (i < MAX_SKB_FRAGS - 2) ? skb : it->skb;

	/* Calculate the length of the tail part. */
	tail_len = skb_frag_size(frag) - off;

	/* Trim the fragment with the head part. */
	skb_frag_size_sub(frag, tail_len);

	/* Make the fragment with the tail part. */
	i = (i + 2) % MAX_SKB_FRAGS;
	__skb_fill_page_desc(skb_dst, i, skb_frag_page(frag),
			     frag->page_offset + off, tail_len);
	__skb_frag_ref(frag);

	/* Adjust SKB data lengths. */
	if (skb != skb_dst) {
		ss_skb_adjust_data_len(skb, -tail_len);
		ss_skb_adjust_data_len(skb_dst, tail_len);
	}

	/* Get the SKB and the address for new data. */
	it->flags = !(i < MAX_SKB_FRAGS - 1);
	frag_dst = it->flags ? &skb_shinfo(it->skb)->frags[0] : frag + 1;
	it->ptr = skb_frag_address(frag_dst);

	return 0;
}
Ejemplo n.º 27
0
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{
	struct mlx5_wq_cyc       *wq   = &sq->wq;

	u16 pi = sq->pc & wq->sz_m1;
	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);

	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
	struct mlx5_wqe_data_seg *dseg;

	u8  opcode = MLX5_OPCODE_SEND;
	dma_addr_t dma_addr = 0;
	bool bf = false;
	u16 headlen;
	u16 ds_cnt;
	u16 ihs;
	int i;

	memset(wqe, 0, sizeof(*wqe));

	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
		eseg->cs_flags	= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
	else
		sq->stats.csum_offload_none++;

	if (sq->cc != sq->prev_cc) {
		sq->prev_cc = sq->cc;
		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
	}

	if (skb_is_gso(skb)) {
		u32 payload_len;

		eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
		opcode       = MLX5_OPCODE_LSO;
		ihs          = skb_transport_offset(skb) + tcp_hdrlen(skb);
		payload_len  = skb->len - ihs;
		MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
					(skb_shinfo(skb)->gso_segs - 1) * ihs;
		sq->stats.tso_packets++;
		sq->stats.tso_bytes += payload_len;
	} else {
		bf = sq->bf_budget &&
		     !skb->xmit_more &&
		     !skb_shinfo(skb)->nr_frags;
		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
		MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
							ETH_ZLEN);
	}

	skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
	skb_pull_inline(skb, ihs);

	eseg->inline_hdr_sz = cpu_to_be16(ihs);

	ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
	ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
			       MLX5_SEND_WQE_DS);
	dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;

	MLX5E_TX_SKB_CB(skb)->num_dma = 0;

	headlen = skb_headlen(skb);
	if (headlen) {
		dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
					  DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
		dseg->byte_count = cpu_to_be32(headlen);

		mlx5e_dma_push(sq, dma_addr, headlen);
		MLX5E_TX_SKB_CB(skb)->num_dma++;

		dseg++;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int fsz = skb_frag_size(frag);

		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
					    DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
			goto dma_unmap_wqe_err;

		dseg->addr       = cpu_to_be64(dma_addr);
		dseg->lkey       = sq->mkey_be;
		dseg->byte_count = cpu_to_be32(fsz);

		mlx5e_dma_push(sq, dma_addr, fsz);
		MLX5E_TX_SKB_CB(skb)->num_dma++;

		dseg++;
	}

	ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;

	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);

	sq->skb[pi] = skb;

	MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
							MLX5_SEND_WQEBB_NUM_DS);
	sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;

	netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);

	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
		netif_tx_stop_queue(sq->txq);
		sq->stats.stopped++;
	}

	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
		int bf_sz = 0;

		if (bf && sq->uar_bf_map)
			bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;

		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
		mlx5e_tx_notify_hw(sq, wqe, bf_sz);
	}

	/* fill sq edge with nops to avoid wqe wrap around */
	while ((sq->pc & wq->sz_m1) > sq->edge)
		mlx5e_send_nop(sq, false);

	sq->bf_budget = bf ? sq->bf_budget - 1 : 0;

	sq->stats.packets++;
	return NETDEV_TX_OK;

dma_unmap_wqe_err:
	sq->stats.dropped++;
	mlx5e_dma_unmap_wqe_err(sq, skb);

	dev_kfree_skb_any(skb);

	return NETDEV_TX_OK;
}
Ejemplo n.º 28
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;
	int sum_truesize;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		consume_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG_CB(head)->offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		add_frag_mem_limit(&qp->q, clone->truesize);
	}

	skb_push(head, head->data - skb_network_header(head));

	sum_truesize = head->truesize;
	for (fp = head->next; fp;) {
		bool headstolen;
		int delta;
		struct sk_buff *next = fp->next;

		sum_truesize += fp->truesize;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);

		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
			kfree_skb_partial(fp, headstolen);
		} else {
			if (!skb_shinfo(head)->frag_list)
				skb_shinfo(head)->frag_list = fp;
			head->data_len += fp->len;
			head->len += fp->len;
			head->truesize += fp->truesize;
		}
		fp = next;
	}
	sub_frag_mem_limit(&qp->q, sum_truesize);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;
	IPCB(head)->frag_max_size = qp->q.max_size;

	iph = ip_hdr(head);
	/* max_size != 0 implies at least one fragment had IP_DF set */
	iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
	iph->tot_len = htons(len);
	iph->tos |= ecn;

	ip_send_check(iph);

	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
		       qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
Ejemplo n.º 29
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		consume_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(!head);
	WARN_ON(FRAG_CB(head)->offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		clone = alloc_skb(0, GFP_ATOMIC);
		if (!clone)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		add_frag_mem_limit(qp->q.net, clone->truesize);
	}

	skb_shinfo(head)->frag_list = head->next;
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	sub_frag_mem_limit(qp->q.net, head->truesize);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;
	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);

	iph = ip_hdr(head);
	iph->tot_len = htons(len);
	iph->tos |= ecn;

	/* When we set IP_DF on a refragmented skb we must also force a
	 * call to ip_fragment to avoid forwarding a DF-skb of size s while
	 * original sender only sent fragments of size f (where f < s).
	 *
	 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
	 * frag seen to avoid sending tiny DF-fragments in case skb was built
	 * from one very small df-fragment and one large non-df frag.
	 */
	if (qp->max_df_size == qp->q.max_size) {
		IPCB(head)->flags |= IPSKB_FRAG_PMTU;
		iph->frag_off = htons(IP_DF);
	} else {
		iph->frag_off = 0;
	}

	ip_send_check(iph);

	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
Ejemplo n.º 30
0
/*	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
			     struct net_device *dev)
{
	struct sk_buff *fp, *head = fq->q.fragments;
	int sum_truesize;

	inet_frag_kill(&fq->q, &lowpan_frags);

	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		if (!fp->next)
			fq->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		consume_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_oom;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments.
	 */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		clone = alloc_skb(0, GFP_ATOMIC);
		if (!clone)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = head->data_len - plen;
		clone->data_len = clone->len;
		head->data_len -= clone->len;
		head->len -= clone->len;
		add_frag_mem_limit(&fq->q, clone->truesize);
	}

	WARN_ON(head == NULL);

	sum_truesize = head->truesize;
	for (fp = head->next; fp;) {
		bool headstolen;
		int delta;
		struct sk_buff *next = fp->next;

		sum_truesize += fp->truesize;
		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
			kfree_skb_partial(fp, headstolen);
		} else {
			if (!skb_shinfo(head)->frag_list)
				skb_shinfo(head)->frag_list = fp;
			head->data_len += fp->len;
			head->len += fp->len;
			head->truesize += fp->truesize;
		}
		fp = next;
	}
	sub_frag_mem_limit(&fq->q, sum_truesize);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = fq->q.stamp;

	fq->q.fragments = NULL;
	fq->q.fragments_tail = NULL;

	return 1;
out_oom:
	net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
	return -1;
}