Beispiel #1
0
/*
 * Allocate mbuf for flow stat (and latency) info sending
 * m - Original mbuf (can be complicated mbuf data structure)
 * fsp_head - return pointer in which the flow stat info should be filled
 * is_const - is the given mbuf const
 * return new mbuf structure in which the fsp_head can be written. If needed, orginal mbuf is freed.
 */
rte_mbuf_t * CGenNodeStateless::alloc_flow_stat_mbuf(rte_mbuf_t *m, struct flow_stat_payload_header *&fsp_head
                                                     , bool is_const) {
    rte_mbuf_t *m_ret = NULL, *m_lat = NULL;
    uint16_t fsp_head_size = sizeof(struct flow_stat_payload_header);

    if (is_const) {
        // const mbuf case
        if (rte_pktmbuf_data_len(m) > 128) {
            m_ret = CGlobalInfo::pktmbuf_alloc_small(get_socket_id());
            assert(m_ret);
            // alloc mbuf just for the latency header
            m_lat = CGlobalInfo::pktmbuf_alloc( get_socket_id(), fsp_head_size);
            assert(m_lat);
            fsp_head = (struct flow_stat_payload_header *)rte_pktmbuf_append(m_lat, fsp_head_size);
            rte_pktmbuf_attach(m_ret, m);
            rte_pktmbuf_trim(m_ret, sizeof(struct flow_stat_payload_header));
            utl_rte_pktmbuf_add_after2(m_ret, m_lat);
            // ref count was updated when we took the (const) mbuf, and again in rte_pktmbuf_attach
            // so need do decrease now, to avoid leak.
            rte_pktmbuf_refcnt_update(m, -1);
            return m_ret;
        } else {
            // Short packet. Just copy all bytes.
            m_ret = CGlobalInfo::pktmbuf_alloc( get_socket_id(), rte_pktmbuf_data_len(m) );
            assert(m_ret);
            char *p = rte_pktmbuf_mtod(m, char*);
            char *p_new = rte_pktmbuf_append(m_ret, rte_pktmbuf_data_len(m));
            memcpy(p_new , p, rte_pktmbuf_data_len(m));
            fsp_head = (struct flow_stat_payload_header *)(p_new + rte_pktmbuf_data_len(m) - fsp_head_size);
            rte_pktmbuf_free(m);
            return m_ret;
        }
    } else {
        // Field engine (vm)
        if (rte_pktmbuf_is_contiguous(m)) {
/**
 * IPv6 fragmentation.
 *
 * This function implements the fragmentation of IPv6 packets.
 *
 * @param pkt_in
 *   The input packet.
 * @param pkts_out
 *   Array storing the output fragments.
 * @param mtu_size
 *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv6
 *   datagrams. This value includes the size of the IPv6 header.
 * @param pool_direct
 *   MBUF pool used for allocating direct buffers for the output fragments.
 * @param pool_indirect
 *   MBUF pool used for allocating indirect buffers for the output fragments.
 * @return
 *   Upon successful completion - number of output fragments placed
 *   in the pkts_out array.
 *   Otherwise - (-1) * <errno>.
 */
int32_t
rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
	struct rte_mbuf **pkts_out,
	uint16_t nb_pkts_out,
	uint16_t mtu_size,
	struct rte_mempool *pool_direct,
	struct rte_mempool *pool_indirect)
{
	struct rte_mbuf *in_seg = NULL;
	struct ipv6_hdr *in_hdr;
	uint32_t out_pkt_pos, in_seg_data_pos;
	uint32_t more_in_segs;
	uint16_t fragment_offset, frag_size;

	frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));

	/* Fragment size should be a multiple of 8. */
	IP_FRAG_ASSERT((frag_size & IPV6_HDR_FO_MASK) == 0);

	/* Check that pkts_out is big enough to hold all fragments */
	if (unlikely (frag_size * nb_pkts_out <
	    (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv6_hdr))))
		return (-EINVAL);

	in_hdr = (struct ipv6_hdr *) pkt_in->pkt.data;

	in_seg = pkt_in;
	in_seg_data_pos = sizeof(struct ipv6_hdr);
	out_pkt_pos = 0;
	fragment_offset = 0;

	more_in_segs = 1;
	while (likely(more_in_segs)) {
		struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
		uint32_t more_out_segs;
		struct ipv6_hdr *out_hdr;

		/* Allocate direct buffer */
		out_pkt = rte_pktmbuf_alloc(pool_direct);
		if (unlikely(out_pkt == NULL)) {
			__free_fragments(pkts_out, out_pkt_pos);
			return (-ENOMEM);
		}

		/* Reserve space for the IP header that will be built later */
		out_pkt->pkt.data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
		out_pkt->pkt.pkt_len  = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);

		out_seg_prev = out_pkt;
		more_out_segs = 1;
		while (likely(more_out_segs && more_in_segs)) {
			struct rte_mbuf *out_seg = NULL;
			uint32_t len;

			/* Allocate indirect buffer */
			out_seg = rte_pktmbuf_alloc(pool_indirect);
			if (unlikely(out_seg == NULL)) {
				rte_pktmbuf_free(out_pkt);
				__free_fragments(pkts_out, out_pkt_pos);
				return (-ENOMEM);
			}
			out_seg_prev->pkt.next = out_seg;
			out_seg_prev = out_seg;

			/* Prepare indirect buffer */
			rte_pktmbuf_attach(out_seg, in_seg);
			len = mtu_size - out_pkt->pkt.pkt_len;
			if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
				len = in_seg->pkt.data_len - in_seg_data_pos;
			}
			out_seg->pkt.data = (char *) in_seg->pkt.data + (uint16_t) in_seg_data_pos;
			out_seg->pkt.data_len = (uint16_t)len;
			out_pkt->pkt.pkt_len = (uint16_t)(len +
			    out_pkt->pkt.pkt_len);
			out_pkt->pkt.nb_segs += 1;
			in_seg_data_pos += len;

			/* Current output packet (i.e. fragment) done ? */
			if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) {
				more_out_segs = 0;
			}

			/* Current input segment done ? */
			if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
				in_seg = in_seg->pkt.next;
				in_seg_data_pos = 0;

				if (unlikely(in_seg == NULL)) {
					more_in_segs = 0;
				}
			}
		}

		/* Build the IP header */

		out_hdr = (struct ipv6_hdr *) out_pkt->pkt.data;

		__fill_ipv6hdr_frag(out_hdr, in_hdr,
		    (uint16_t) out_pkt->pkt.pkt_len - sizeof(struct ipv6_hdr),
		    fragment_offset, more_in_segs);

		fragment_offset = (uint16_t)(fragment_offset +
		    out_pkt->pkt.pkt_len - sizeof(struct ipv6_hdr)
			- sizeof(struct ipv6_extension_fragment));

		/* Write the fragment to the output list */
		pkts_out[out_pkt_pos] = out_pkt;
		out_pkt_pos ++;
	}

	return (out_pkt_pos);
}
Beispiel #3
0
/**
 * IPv4 fragmentation.
 *
 * This function implements the fragmentation of IPv4 packets.
 *
 * @param pkt_in
 *   The input packet.
 * @param pkts_out
 *   Array storing the output fragments.
 * @param mtu_size
 *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
 *   datagrams. This value includes the size of the IPv4 header.
 * @param pool_direct
 *   MBUF pool used for allocating direct buffers for the output fragments.
 * @param pool_indirect
 *   MBUF pool used for allocating indirect buffers for the output fragments.
 * @return
 *   Upon successful completion - number of output fragments placed
 *   in the pkts_out array.
 *   Otherwise - (-1) * <errno>.
 */
int32_t
rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
	struct rte_mbuf **pkts_out,
	uint16_t nb_pkts_out,
	uint16_t mtu_size,
	struct rte_mempool *pool_direct,
	struct rte_mempool *pool_indirect)
{
	struct rte_mbuf *in_seg = NULL;
	struct ipv4_hdr *in_hdr;
	uint32_t out_pkt_pos, in_seg_data_pos;
	uint32_t more_in_segs;
	uint16_t fragment_offset, flag_offset, frag_size;

	frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));

	/* Fragment size should be a multiply of 8. */
	IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);

	in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);

	/* If Don't Fragment flag is set */
	if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
		return -ENOTSUP;

	/* Check that pkts_out is big enough to hold all fragments */
	if (unlikely(frag_size * nb_pkts_out <
	    (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
		return -EINVAL;

	in_seg = pkt_in;
	in_seg_data_pos = sizeof(struct ipv4_hdr);
	out_pkt_pos = 0;
	fragment_offset = 0;

	more_in_segs = 1;
	while (likely(more_in_segs)) {
		struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
		uint32_t more_out_segs;
		struct ipv4_hdr *out_hdr;

		/* Allocate direct buffer */
		out_pkt = rte_pktmbuf_alloc(pool_direct);
		if (unlikely(out_pkt == NULL)) {
			__free_fragments(pkts_out, out_pkt_pos);
			return -ENOMEM;
		}

		/* Reserve space for the IP header that will be built later */
		out_pkt->data_len = sizeof(struct ipv4_hdr);
		out_pkt->pkt_len = sizeof(struct ipv4_hdr);

		out_seg_prev = out_pkt;
		more_out_segs = 1;
		while (likely(more_out_segs && more_in_segs)) {
			struct rte_mbuf *out_seg = NULL;
			uint32_t len;

			/* Allocate indirect buffer */
			out_seg = rte_pktmbuf_alloc(pool_indirect);
			if (unlikely(out_seg == NULL)) {
				rte_pktmbuf_free(out_pkt);
				__free_fragments(pkts_out, out_pkt_pos);
				return -ENOMEM;
			}
			out_seg_prev->next = out_seg;
			out_seg_prev = out_seg;

			/* Prepare indirect buffer */
			rte_pktmbuf_attach(out_seg, in_seg);
			len = mtu_size - out_pkt->pkt_len;
			if (len > (in_seg->data_len - in_seg_data_pos)) {
				len = in_seg->data_len - in_seg_data_pos;
			}
			out_seg->data_off = in_seg->data_off + in_seg_data_pos;
			out_seg->data_len = (uint16_t)len;
			out_pkt->pkt_len = (uint16_t)(len +
			    out_pkt->pkt_len);
			out_pkt->nb_segs += 1;
			in_seg_data_pos += len;

			/* Current output packet (i.e. fragment) done ? */
			if (unlikely(out_pkt->pkt_len >= mtu_size))
				more_out_segs = 0;

			/* Current input segment done ? */
			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
				in_seg = in_seg->next;
				in_seg_data_pos = 0;

				if (unlikely(in_seg == NULL))
					more_in_segs = 0;
			}
		}

		/* Build the IP header */

		out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv4_hdr *);

		__fill_ipv4hdr_frag(out_hdr, in_hdr,
		    (uint16_t)out_pkt->pkt_len,
		    flag_offset, fragment_offset, more_in_segs);

		fragment_offset = (uint16_t)(fragment_offset +
		    out_pkt->pkt_len - sizeof(struct ipv4_hdr));

		out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
		out_pkt->l3_len = sizeof(struct ipv4_hdr);

		/* Write the fragment to the output list */
		pkts_out[out_pkt_pos] = out_pkt;
		out_pkt_pos ++;
	}

	return out_pkt_pos;
}