示例#1
0
/*
 * Allocate mbuf for flow stat (and latency) info sending
 * m - Original mbuf (can be complicated mbuf data structure)
 * fsp_head - return pointer in which the flow stat info should be filled
 * is_const - is the given mbuf const
 * return new mbuf structure in which the fsp_head can be written. If needed, orginal mbuf is freed.
 */
rte_mbuf_t * CGenNodeStateless::alloc_flow_stat_mbuf(rte_mbuf_t *m, struct flow_stat_payload_header *&fsp_head
                                                     , bool is_const) {
    rte_mbuf_t *m_ret = NULL, *m_lat = NULL;
    uint16_t fsp_head_size = sizeof(struct flow_stat_payload_header);

    if (is_const) {
        // const mbuf case
        if (rte_pktmbuf_data_len(m) > 128) {
            m_ret = CGlobalInfo::pktmbuf_alloc_small(get_socket_id());
            assert(m_ret);
            // alloc mbuf just for the latency header
            m_lat = CGlobalInfo::pktmbuf_alloc( get_socket_id(), fsp_head_size);
            assert(m_lat);
            fsp_head = (struct flow_stat_payload_header *)rte_pktmbuf_append(m_lat, fsp_head_size);
            rte_pktmbuf_attach(m_ret, m);
            rte_pktmbuf_trim(m_ret, sizeof(struct flow_stat_payload_header));
            utl_rte_pktmbuf_add_after2(m_ret, m_lat);
            // ref count was updated when we took the (const) mbuf, and again in rte_pktmbuf_attach
            // so need do decrease now, to avoid leak.
            rte_pktmbuf_refcnt_update(m, -1);
            return m_ret;
        } else {
            // Short packet. Just copy all bytes.
            m_ret = CGlobalInfo::pktmbuf_alloc( get_socket_id(), rte_pktmbuf_data_len(m) );
            assert(m_ret);
            char *p = rte_pktmbuf_mtod(m, char*);
            char *p_new = rte_pktmbuf_append(m_ret, rte_pktmbuf_data_len(m));
            memcpy(p_new , p, rte_pktmbuf_data_len(m));
            fsp_head = (struct flow_stat_payload_header *)(p_new + rte_pktmbuf_data_len(m) - fsp_head_size);
            rte_pktmbuf_free(m);
            return m_ret;
        }
    } else {
        // Field engine (vm)
        if (rte_pktmbuf_is_contiguous(m)) {
示例#2
0
static inline void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
	int completed = 0;
	struct rte_mbuf *mbuf;
	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
		(comp_ring->base + comp_ring->next2proc);

	while (tcd->gen == comp_ring->gen) {

		/* Release cmd_ring descriptor and free mbuf */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
		VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
#endif
		mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
		if (unlikely(mbuf == NULL))
			rte_panic("EOP desc does not point to a valid mbuf");
		else
			rte_pktmbuf_free(mbuf);


		txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL;
		/* Mark the txd for which tcd was generated as completed */
		vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);

		vmxnet3_comp_ring_adv_next2proc(comp_ring);
		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
						    comp_ring->next2proc);
		completed++;
	}

	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
示例#3
0
/* to test that the distributor does not lose packets, we use this worker
 * function which frees mbufs when it gets them. The distributor thread does
 * the mbuf allocation. If distributor drops packets we'll eventually run out
 * of mbufs.
 */
static int
handle_work_with_free_mbufs(void *arg)
{
	struct rte_mbuf *buf[8] __rte_cache_aligned;
	struct worker_params *wp = arg;
	struct rte_distributor *d = wp->dist;
	unsigned int count = 0;
	unsigned int i;
	unsigned int num = 0;
	unsigned int id = __sync_fetch_and_add(&worker_idx, 1);

	for (i = 0; i < 8; i++)
		buf[i] = NULL;
	num = rte_distributor_get_pkt(d, id, buf, buf, num);
	while (!quit) {
		worker_stats[id].handled_packets += num;
		count += num;
		for (i = 0; i < num; i++)
			rte_pktmbuf_free(buf[i]);
		num = rte_distributor_get_pkt(d,
				id, buf, buf, num);
	}
	worker_stats[id].handled_packets += num;
	count += num;
	rte_distributor_return_pkt(d, id, buf, num);
	return 0;
}
示例#4
0
static inline void
send_burst_nodrop(struct rte_port_fd_writer_nodrop *p)
{
	uint64_t n_retries;
	uint32_t i;

	n_retries = 0;
	for (i = 0; (i < p->tx_buf_count) && (n_retries < p->n_retries); i++) {
		struct rte_mbuf *pkt = p->tx_buf[i];
		void *pkt_data = rte_pktmbuf_mtod(pkt, void*);
		size_t n_bytes = rte_pktmbuf_data_len(pkt);

		for ( ; n_retries < p->n_retries; n_retries++) {
			ssize_t ret;

			ret = write(p->fd, pkt_data, n_bytes);
			if (ret)
				break;
		}
	}

	RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - i);

	for (i = 0; i < p->tx_buf_count; i++)
		rte_pktmbuf_free(p->tx_buf[i]);

	p->tx_buf_count = 0;
}
示例#5
0
static int
rte_port_fd_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
	struct rte_port_fd_reader *p = port;
	uint32_t i, j;

	if (rte_pktmbuf_alloc_bulk(p->mempool, pkts, n_pkts) != 0)
		return 0;

	for (i = 0; i < n_pkts; i++) {
		struct rte_mbuf *pkt = pkts[i];
		void *pkt_data = rte_pktmbuf_mtod(pkt, void *);
		ssize_t n_bytes;

		n_bytes = read(p->fd, pkt_data, (size_t) p->mtu);
		if (n_bytes <= 0)
			break;

		pkt->data_len = n_bytes;
		pkt->pkt_len = n_bytes;
	}

	for (j = i; j < n_pkts; j++)
		rte_pktmbuf_free(pkts[j]);

	RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(p, i);

	return i;
}
示例#6
0
static void
send_paxos_message(paxos_message *pm) {
    uint8_t port_id = 0;
    struct rte_mbuf *created_pkt = rte_pktmbuf_alloc(mbuf_pool);
    created_pkt->l2_len = sizeof(struct ether_hdr);
    created_pkt->l3_len = sizeof(struct ipv4_hdr);
    created_pkt->l4_len = sizeof(struct udp_hdr) + sizeof(paxos_message);
    craft_new_packet(&created_pkt, IPv4(192,168,4,99), ACCEPTOR_ADDR,
                     PROPOSER_PORT, ACCEPTOR_PORT, sizeof(paxos_message), port_id);
    //struct udp_hdr *udp;
    size_t udp_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
    //udp  = rte_pktmbuf_mtod_offset(created_pkt, struct udp_hdr *, udp_offset);
    size_t paxos_offset = udp_offset + sizeof(struct udp_hdr);
    struct paxos_hdr *px = rte_pktmbuf_mtod_offset(created_pkt, struct paxos_hdr *, paxos_offset);
    px->msgtype = rte_cpu_to_be_16(pm->type);
    px->inst = rte_cpu_to_be_32(pm->u.accept.iid);
    px->inst = rte_cpu_to_be_32(pm->u.accept.iid);
    px->rnd = rte_cpu_to_be_16(pm->u.accept.ballot);
    px->vrnd = rte_cpu_to_be_16(pm->u.accept.value_ballot);
    px->acptid = 0;
    rte_memcpy(px->paxosval, pm->u.accept.value.paxos_value_val, pm->u.accept.value.paxos_value_len);
    created_pkt->ol_flags = PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM;
    const uint16_t nb_tx = rte_eth_tx_burst(port_id, 0, &created_pkt, 1);
    rte_pktmbuf_free(created_pkt);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Send %d messages\n", nb_tx);
}
示例#7
0
static inline void
__free_fragments(struct rte_mbuf *mb[], uint32_t num)
{
	uint32_t i;
	for (i = 0; i < num; i++)
		rte_pktmbuf_free(mb[i]);
}
示例#8
0
文件: main.c 项目: SpirentOrion/dpdk
/*
 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
 * queue before disabling RX on the device.
 */
static inline void
unlink_vmdq(struct virtio_net *dev)
{
	unsigned i = 0;
	unsigned rx_count;
	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];

	if (dev->ready == DEVICE_READY) {
		/*clear MAC and VLAN settings*/
		rte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);
		for (i = 0; i < 6; i++)
			dev->mac_address.addr_bytes[i] = 0;

		dev->vlan_tag = 0;

		/*Clear out the receive buffers*/
		rx_count = rte_eth_rx_burst(ports[0],
					(uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);

		while (rx_count) {
			for (i = 0; i < rx_count; i++)
				rte_pktmbuf_free(pkts_burst[i]);

			rx_count = rte_eth_rx_burst(ports[0],
					(uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
		}

		dev->ready = DEVICE_NOT_READY;
	}
}
示例#9
0
static inline void
send_burst_nodrop(struct rte_port_ethdev_writer_nodrop *p)
{
	uint32_t nb_tx = 0, i;

	nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf,
			p->tx_buf_count);

	/* We sent all the packets in a first try */
	if (nb_tx >= p->tx_buf_count) {
		p->tx_buf_count = 0;
		return;
	}

	for (i = 0; i < p->n_retries; i++) {
		nb_tx += rte_eth_tx_burst(p->port_id, p->queue_id,
							 p->tx_buf + nb_tx, p->tx_buf_count - nb_tx);

		/* We sent all the packets in more than one try */
		if (nb_tx >= p->tx_buf_count) {
			p->tx_buf_count = 0;
			return;
		}
	}

	/* We didn't send the packets in maximum allowed attempts */
	RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
	for ( ; nb_tx < p->tx_buf_count; nb_tx++)
		rte_pktmbuf_free(p->tx_buf[nb_tx]);

	p->tx_buf_count = 0;
}
示例#10
0
/**
 * @brief           RX routine
 */
void DPDKAdapter::rxRoutine()
{
    uint8_t pkt = 0;
    uint8_t rxPktCount = 0;
    uint8_t devId = 0;

    uint8_t lcoreId = rte_lcore_id();
    LcoreInfo& coreInfo = cores[lcoreId];

    for(PortList_t::iterator itor = coreInfo.rxPortList.begin(); itor != coreInfo.rxPortList.end(); itor++)
    {
        devId = *itor;

        DeviceInfo& devInfo = devices[devId];

        struct rte_eth_dev *dev = &rte_eth_devices[devId];
        if(!dev || !dev->data->dev_started)
        {
            continue;
        }

        rxPktCount = rte_eth_rx_burst(devId, 0, devInfo.rxBurstBuf, DPDK_RX_MAX_PKT_BURST);

        if(isRxStarted(devId))
        {
            saveToBuf(devId, devInfo.rxBurstBuf, rxPktCount);
        }

        for(pkt = 0; pkt < rxPktCount; pkt++)
        {
            rte_pktmbuf_free(devInfo.rxBurstBuf[pkt]);
        }
    }
}
示例#11
0
/**
 * @brief           Copy all mbuf segments to a buffer
 *
 * @param devId     port number
 * @param pMbuf     mbuf
 * @param data      Data buffer
 * @param dataLen   Data buffer length
 *
 * @return          true on success
 */
bool DPDKAdapter::copyMbufToBuf(uint8_t devId, MBuf_t* pMbuf, char* data, unsigned int& dataLen)
{
    qDebug("pkt_len %u, data_len %u, nb_segs %u", pMbuf->pkt.pkt_len, pMbuf->pkt.data_len, pMbuf->pkt.nb_segs);

    unsigned int segCnt = pMbuf->pkt.nb_segs;
    unsigned int offset = 0;

    MBuf_t* pNextMbuf = pMbuf;
    dataLen = pMbuf->pkt.pkt_len;

    while (segCnt > 0)
    {
        MBuf_t* pCurMbuf = pNextMbuf;
        qDebug("segCnt %u, offset %u", segCnt, offset);

        rte_memcpy(data + offset, pCurMbuf->pkt.data, pCurMbuf->pkt.data_len);

        qDebug("pkt_len %u, data_len %u", pCurMbuf->pkt.pkt_len, pCurMbuf->pkt.data_len);

        offset += pCurMbuf->pkt.data_len;
        pNextMbuf = pCurMbuf->pkt.next;

        rte_pktmbuf_free(pCurMbuf);

        segCnt--;
    }

    return true;
}
示例#12
0
int pcap_next_ex(pcap_t *p, struct pcap_pkthdr **pkt_header,
    const u_char **pkt_data)
{
    struct rte_mbuf* mbuf = NULL;
    int              len  = 0;

    if (p == NULL || pkt_header == NULL || pkt_data == NULL ||
        p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS)
    {
        snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter");
        return DPDKPCAP_FAILURE;
    }

    debug("Receiving a packet on port %d\n", p->deviceId);

    while (!rte_eth_rx_burst(p->deviceId, 0, &mbuf, 1))
    {
    }

    len = rte_pktmbuf_pkt_len(mbuf);

    pktHeader_g.len = len;
    *pkt_header = &pktHeader_g;

    rte_memcpy((void*)data_g, rte_pktmbuf_mtod(mbuf, void*), len);
    *pkt_data = data_g;

    rte_pktmbuf_free(mbuf);

    return 1;
}
示例#13
0
static void
kni_allocate_mbufs(struct rte_kni *kni)
{
	int i, ret;
	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];

	/* Check if pktmbuf pool has been configured */
	if (kni->pktmbuf_pool == NULL) {
		RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
		return;
	}

	for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
		pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
		if (unlikely(pkts[i] == NULL)) {
			/* Out of memory */
			RTE_LOG(ERR, KNI, "Out of memory\n");
			break;
		}
	}

	/* No pkt mbuf alocated */
	if (i <= 0)
		return;

	ret = kni_fifo_put(kni->alloc_q, (void **)pkts, i);

	/* Check if any mbufs not put into alloc_q, and then free them */
	if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
		int j;

		for (j = ret; j < i; j++)
			rte_pktmbuf_free(pkts[j]);
	}
}
示例#14
0
文件: main.c 项目: mengxiang0811/dpdk
/*
 * Main thread that does the work, reading from INPUT_PORT
 * and writing to OUTPUT_PORT
 */
	static  __attribute__((noreturn)) void
lcore_main(void)
{
	uint8_t port = 0;

	if (rte_eth_dev_socket_id(port) > 0 &&
			rte_eth_dev_socket_id(port) !=
			(int)rte_socket_id())
		printf("WARNING, port %u is on remote NUMA node to "
				"polling thread.\n\tPerformance will "
				"not be optimal.\n", port);

	printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
			rte_lcore_id());
	for (;;) {
		struct rte_mbuf *bufs[BURST_SIZE];
		const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
				bufs, BURST_SIZE);
		uint16_t buf;

		if (unlikely(nb_rx == 0))
			continue;

		for (buf = 0; buf < nb_rx; buf++) {
			struct rte_mbuf *mbuf = bufs[buf];
			unsigned int len = rte_pktmbuf_data_len(mbuf);
			rte_pktmbuf_dump(stdout, mbuf, len);
			rte_pktmbuf_free(mbuf);
		}
	}
}
示例#15
0
文件: mempool.c 项目: fazhar/e2d2
static void set_mempool(struct rte_mempool *mempool) {
#if (!PER_CORE)
	int initialized[RTE_MAX_NUMA_NODES];
	for (int i = 0; i < RTE_MAX_NUMA_NODES; i++) {
		initialized[i] = 0;
	}
#endif
	if (mempool == NULL) {
		rte_panic("Got a NULL mempool");
	}
	/* Loop through all cores, to see if any of them belong to this
	 * socket. */
	for (int i = 0; i < RTE_MAX_LCORE; i++) {
		int sid = rte_lcore_to_socket_id(i);
#if (!PER_CORE)
		if (!initialized[sid]) {
#endif
			struct rte_mbuf *mbuf = NULL;
#if (PER_CORE)
			pframe_pool[i] = mempool;
#else
			pframe_pool[sid] = mempool;
#endif
			/* Initialize mbuf template */
#if PER_CORE
			mbuf = rte_pktmbuf_alloc(pframe_pool[i]);
			if (mbuf == NULL) {
				rte_panic("Bad mbuf");
			}
			mbuf_template[i] = *mbuf;
			rte_pktmbuf_free(mbuf);
#else
			mbuf = rte_pktmbuf_alloc(pframe_pool[sid]);
			if (mbuf == NULL || 
			    mbuf->next != NULL || 
			    mbuf->pool == NULL) {
				rte_panic("Bad mbuf");
			}
			mbuf_template[sid] = *mbuf;
			rte_pktmbuf_free(mbuf);
#endif
#if (!PER_CORE)
			initialized[sid] = 1;
		}
#endif
	}
}
void CGenNodeStateless::free_stl_node(){
    /* if we have cache mbuf free it */
    rte_mbuf_t * m=get_cache_mbuf();
    if (m) {
        rte_pktmbuf_free(m);
        m_cache_mbuf=0;
    }
}
示例#17
0
文件: main.c 项目: Cosios/dpdk
static inline void
pktmbuf_free_bulk(struct rte_mbuf *mbuf_table[], unsigned n)
{
	unsigned int i;

	for (i = 0; i < n; i++)
		rte_pktmbuf_free(mbuf_table[i]);
}
示例#18
0
static int
testclone_testupdate_testdetach(void)
{
#ifndef RTE_MBUF_SCATTER_GATHER
	return 0;
#else
	struct rte_mbuf *mc = NULL;
	struct rte_mbuf *clone = NULL;

	/* alloc a mbuf */

	mc = rte_pktmbuf_alloc(pktmbuf_pool);
	if (mc == NULL)
		GOTO_FAIL("ooops not allocating mbuf");

	if (rte_pktmbuf_pkt_len(mc) != 0)
		GOTO_FAIL("Bad length");


	/* clone the allocated mbuf */
	clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
	if (clone == NULL)
		GOTO_FAIL("cannot clone data\n");
	rte_pktmbuf_free(clone);

	mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool);
	if(mc->pkt.next == NULL)
		GOTO_FAIL("Next Pkt Null\n");

	clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
	if (clone == NULL)
		GOTO_FAIL("cannot clone data\n");

	/* free mbuf */
	rte_pktmbuf_free(mc);
	rte_pktmbuf_free(clone);
	mc = NULL;
	clone = NULL;
	return 0;

fail:
	if (mc)
		rte_pktmbuf_free(mc);
	return -1;
#endif /* RTE_MBUF_SCATTER_GATHER */
}
示例#19
0
static void
enic_free_rq_buf(__rte_unused struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
	if (!buf->os_buf)
		return;

	rte_pktmbuf_free((struct rte_mbuf *)buf->os_buf);
	buf->os_buf = NULL;
}
示例#20
0
/*
 * Softnic packet forward
 */
static void
softnic_fwd(struct fwd_stream *fs)
{
	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
	uint16_t nb_rx;
	uint16_t nb_tx;
	uint32_t retry;

#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	uint64_t start_tsc;
	uint64_t end_tsc;
	uint64_t core_cycles;
#endif

#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	start_tsc = rte_rdtsc();
#endif

	/*  Packets Receive */
	nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
			pkts_burst, nb_pkt_per_burst);
	fs->rx_packets += nb_rx;

#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif

	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
			pkts_burst, nb_rx);

	/* Retry if necessary */
	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
		retry = 0;
		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
			rte_delay_us(burst_tx_delay_time);
			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
					&pkts_burst[nb_tx], nb_rx - nb_tx);
		}
	}
	fs->tx_packets += nb_tx;

#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif

	if (unlikely(nb_tx < nb_rx)) {
		fs->fwd_dropped += (nb_rx - nb_tx);
		do {
			rte_pktmbuf_free(pkts_burst[nb_tx]);
		} while (++nb_tx < nb_rx);
	}
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	end_tsc = rte_rdtsc();
	core_cycles = (end_tsc - start_tsc);
	fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
#endif
}
示例#21
0
文件: util.c 项目: jhjgithub/fw
void
util_free_mbufs_burst(struct rte_mbuf **pkts, unsigned count)
{
	unsigned i;

	for (i = 0; i < count; i++) {
		rte_pktmbuf_free(pkts[i]);
	}
}
示例#22
0
文件: main.c 项目: weixu8/dpdk-ovs
/*
 * Enqueue single packet to a port
 */
static void
send_to_port(uint8_t vportid, struct rte_mbuf *buf)
{
	struct port_queue *pq = &port_queues[vportid & PORT_MASK];

	if (rte_ring_mp_enqueue(pq->tx_q, (void *)buf) < 0) {
		rte_pktmbuf_free(buf);
	}
}
示例#23
0
文件: enic_main.c 项目: donlet/dpdk
static void
enic_free_rq_buf(struct rte_mbuf **mbuf)
{
	if (*mbuf == NULL)
		return;

	rte_pktmbuf_free(*mbuf);
	mbuf = NULL;
}
示例#24
0
void CGenNodeStateless::cache_mbuf_array_free(){

    assert(m_cache_mbuf);
    int i;
    for (i=0; i<(int)m_cache_size; i++) {
        rte_mbuf_t * m=cache_mbuf_array_get((uint16_t)i);
        assert(m);
        rte_pktmbuf_free(m); 
    }

    /* free the const */
    rte_mbuf_t * m=cache_mbuf_array_get_const_mbuf() ;
    if (m) {
        rte_pktmbuf_free(m); 
    }

    free(m_cache_mbuf);
    m_cache_mbuf=0;
}
示例#25
0
void
xmit_arp_req(struct gatekeeper_if *iface, const struct ipaddr *addr,
	const struct ether_addr *ha, uint16_t tx_queue)
{
	struct rte_mbuf *created_pkt;
	struct ether_hdr *eth_hdr;
	struct arp_hdr *arp_hdr;
	size_t pkt_size;
	struct lls_config *lls_conf = get_lls_conf();
	int ret;

	struct rte_mempool *mp = lls_conf->net->gatekeeper_pktmbuf_pool[
		rte_lcore_to_socket_id(lls_conf->lcore_id)];
	created_pkt = rte_pktmbuf_alloc(mp);
	if (created_pkt == NULL) {
		LLS_LOG(ERR, "Could not alloc a packet for an ARP request\n");
		return;
	}

	pkt_size = iface->l2_len_out + sizeof(struct arp_hdr);
	created_pkt->data_len = pkt_size;
	created_pkt->pkt_len = pkt_size;

	/* Set-up Ethernet header. */
	eth_hdr = rte_pktmbuf_mtod(created_pkt, struct ether_hdr *);
	ether_addr_copy(&iface->eth_addr, &eth_hdr->s_addr);
	if (ha == NULL)
		memset(&eth_hdr->d_addr, 0xFF, ETHER_ADDR_LEN);
	else
		ether_addr_copy(ha, &eth_hdr->d_addr);

	/* Set-up VLAN header. */
	if (iface->vlan_insert)
		fill_vlan_hdr(eth_hdr, iface->vlan_tag_be, ETHER_TYPE_ARP);
	else
		eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);

	/* Set-up ARP header. */
	arp_hdr = pkt_out_skip_l2(iface, eth_hdr);
	arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER);
	arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
	arp_hdr->arp_hln = ETHER_ADDR_LEN;
	arp_hdr->arp_pln = sizeof(struct in_addr);
	arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REQUEST);
	ether_addr_copy(&iface->eth_addr, &arp_hdr->arp_data.arp_sha);
	arp_hdr->arp_data.arp_sip = iface->ip4_addr.s_addr;
	memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN);
	arp_hdr->arp_data.arp_tip = addr->ip.v4.s_addr;

	ret = rte_eth_tx_burst(iface->id, tx_queue, &created_pkt, 1);
	if (ret <= 0) {
		rte_pktmbuf_free(created_pkt);
		LLS_LOG(ERR, "Could not transmit an ARP request\n");
	}
}
示例#26
0
void
counter_firewall_pkt(void *arg, struct rte_mbuf **buffer, int nb_rx) {
	struct counter_t *counter = (struct counter_t *) arg;

	poll_counter(counter);

	if (nb_rx != 0) {
    uint64_t start_c = rte_get_tsc_cycles(), diff_c;

	// check table and send packet 
	// check if <drop_at> votes are to drop the packet
	// if yes: drop it!
	// else send it

	struct indextable_entry *entry;
	struct rte_mbuf *ok_pkt;
	struct metadata_t *meta;
	struct ether_hdr *eth;

	for (unsigned i = 0; i < nb_rx; ++i) {
		struct ether_hdr *eth = rte_pktmbuf_mtod(buffer[i], struct ether_hdr *);
		if (!is_same_ether_addr(&counter->fw_port_mac, &eth->d_addr)) {
			RTE_LOG(INFO, COUNTER, "Wrong d_MAC... "FORMAT_MAC"\n", ARG_V_MAC(eth->d_addr));
			continue;
		}

		entry = indextable_get(counter->indextable, buffer[i]);

		if (entry != NULL) {
			ok_pkt = entry->packet;
			meta = &entry->meta;
			meta->decissions |= 1 << counter->chain_index;

			int decission_count = count_decissions(meta->decissions);

            counter->pkts_received_fw += nb_rx;
			if (decission_count >= counter->drop_at) {
				fwd_to_wrapper(counter, ok_pkt, meta);
			} else {
				rte_pktmbuf_free(ok_pkt);
				counter->pkts_dropped++;
			}

			indextable_delete(counter->indextable, entry);
			counter->nb_mbuf--;

		} else {
			RTE_LOG(WARNING, COUNTER, "Received unregistered packet.\n");

			// print_packet_hex(buffer[i]);
		}
	}
    diff_c = rte_get_tsc_cycles() - start_c;
    counter->cTime += diff_c;//* 1000.0 / rte_get_tsc_hz();
	}
示例#27
0
/*
 * Get mbuf off of interface, copy it into memory provided by the
 * TCP/IP stack.  TODO: share TCP/IP stack mbufs with DPDK mbufs to avoid
 * data copy.
 */
int
rumpcomp_virtif_recv(struct virtif_user *viu,
                     void *data, size_t dlen, size_t *rcvp)
{
    void *cookie = rumpuser_component_unschedule();
    uint8_t *p = data;
    struct rte_mbuf *m, *m0;
    struct rte_pktmbuf *mp;
    int nb_rx, rv;

    for (;;) {
        nb_rx = rte_eth_rx_burst(IF_PORTID, 0, &m, 1);

        if (nb_rx) {
            assert(nb_rx == 1);

            mp = &m->pkt;
            if (mp->pkt_len > dlen) {
                /* for now, just drop packets we can't handle */
                printf("warning: virtif recv packet too big "
                       "%d vs. %zu\n", mp->pkt_len, dlen);
                rte_pktmbuf_free(m);
                continue;
            }
            *rcvp = mp->pkt_len;
            m0 = m;
            do {
                mp = &m->pkt;
                memcpy(p, mp->data, mp->data_len);
                p += mp->data_len;
            } while ((m = mp->next) != NULL);
            rte_pktmbuf_free(m0);
            rv = 0;
            break;
        } else {
            usleep(10000); /* XXX: don't 100% busyloop */
        }
    }

    rumpuser_component_schedule(cookie);
    return rv;
}
示例#28
0
/*
functional description:
pkt which not fulfil  forwarding  rules will be drop here ,and sys rc will deallocated
input mod:RX_MOD_DROP
output mod:RX_MOD_IDLE
module stack pos:last module
date :2014-05-10
author:jzheng

*/
dbg_local enum RX_MOD_INDEX rx_module_drop(dbg_local struct rte_mbuf*pktbuf,dbg_local enum RX_MOD_INDEX imodid)
{
	dbg_local enum RX_MOD_INDEX nextmodid=RX_MOD_IDLE;//default mod id can be assigned to any mod ,as this mod is last one
	int iport_in;
	if(imodid!=RX_MOD_DROP)//check entry legality
			goto local_ret;
	iport_in=pktbuf->pkt.in_port;
	rte_pktmbuf_free(pktbuf);
	local_ret:
	return nextmodid;
}
示例#29
0
/*
 * Return a buffered packet.
 */
int
onvm_nf_return_pkt(struct rte_mbuf* pkt) {
        /* FIXME: should we get a batch of buffered packets and then enqueue? Can we keep stats? */
        if(unlikely(rte_ring_enqueue(tx_ring, pkt) == -ENOBUFS)) {
                rte_pktmbuf_free(pkt);
                tx_stats->tx_drop[nf_info->instance_id]++;
                return -ENOBUFS;
        }
        else tx_stats->tx_returned[nf_info->instance_id]++;
        return 0;
}
示例#30
0
static void
kni_free_mbufs(struct rte_kni *kni)
{
	int i, ret;
	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];

	ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
	if (likely(ret > 0)) {
		for (i = 0; i < ret; i++)
			rte_pktmbuf_free(pkts[i]);
	}
}