/* * Softnic packet forward */ static void softnic_fwd(struct fwd_stream *fs) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; uint16_t nb_rx; uint16_t nb_tx; uint32_t retry; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; uint64_t end_tsc; uint64_t core_cycles; #endif #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES start_tsc = rte_rdtsc(); #endif /* Packets Receive */ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, nb_pkt_per_burst); fs->rx_packets += nb_rx; #ifdef RTE_TEST_PMD_RECORD_BURST_STATS fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; #endif nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); /* Retry if necessary */ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { retry = 0; while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { rte_delay_us(burst_tx_delay_time); nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, &pkts_burst[nb_tx], nb_rx - nb_tx); } } fs->tx_packets += nb_tx; #ifdef RTE_TEST_PMD_RECORD_BURST_STATS fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; #endif if (unlikely(nb_tx < nb_rx)) { fs->fwd_dropped += (nb_rx - nb_tx); do { rte_pktmbuf_free(pkts_burst[nb_tx]); } while (++nb_tx < nb_rx); } #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES end_tsc = rte_rdtsc(); core_cycles = (end_tsc - start_tsc); fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); #endif }
/** * @brief Send packet from specified device * * @param devId uint8_t, ID of DPDK device * @param mBuf MBuf_t*, memory buffer * * @return int, count of transmitted packets */ int DPDKAdapter::sendMbufBurstWithoutFree(uint8_t devId, MBuf_t** burst, uint8_t size) { if (devId > RTE_MAX_ETHPORTS) { qCritical("Device ID is out of range"); return -1; } if (size > DPDK_TX_MAX_PKT_BURST) { qCritical("Maximum packet burst value is exceeded"); return -2; } // Nothing to do if (!burst) { qWarning("There is nothing to send"); return 0; } for (uint8_t i = 0; i < size; i++) { rte_pktmbuf_refcnt_update(burst[i], 1); } return rte_eth_tx_burst(devId, 0, burst, size); }
//smallboy: We change the whole func here; int ip_local_out(struct sk_buff *skb) { //us_nic_port *port = NULL; //port = get_local_out_port(skb); //if (port == NULL ){ // IP_INC_STATS_BH(skb->pnet, IPSTATS_MIB_OUTNOROUTES); // return US_ENETUNREACH; //} s32 ret = US_RET_OK; u32 n ; struct rte_mbuf *mbuf = (struct rte_mbuf*)(skb->head); struct net *pnet = skb->pnet; if(skb->nohdr ){ if( skb->nf_trace || skb->used > 0 ){ rte_pktmbuf_prepend(mbuf, skb->mac_len); } mbuf_rebuild(skb , pnet->port); //ret = ip_send_out(pnet,mbuf,skb); //smallboy: unkown error here; ip traunked; ??? recv_pkt_dump(&mbuf, 1); n = rte_eth_tx_burst(pnet->port_id, pnet->send_queue_id , &mbuf, 1); if(n < 1){ US_ERR("TH:%u ,tx_burst failed on skb_id:%u sk_id:%u \n" ,US_GET_LCORE(),skb->skb_id,skb->sk->sk_id); IP_ADD_STATS(skb->pnet,IPSTATS_MIB_OUTDISCARDS , 1); ret = US_ENETDOWN; }else{ ret = US_RET_OK; IP_ADD_STATS(skb->pnet,IPSTATS_MIB_OUTPKTS , 1); //skb->gso_segs == 1; } if(skb->users == 1){ //smallboy: More attention about the users,clone,used and nf_trace; __kfree_skb(skb,US_MBUF_FREE_BY_OTHER); //users == 1; not cloned; or errors here; }else{ if(ret == US_RET_OK) //smallboy: data send failed; No recover the users too; skb->users--; skb->used++; skb_reset_data_header(skb); } }else{ if(skb_can_gso(skb)){ ret = ip_send_out_batch(skb,pnet); } if(skb->users == 1){ //smallboy: More attention about the users,clone,used and nf_trace; __kfree_skb(skb,US_MBUF_FREE_BY_STACK); //users == 1; not cloned; or errors here; }else{ if(ret == US_RET_OK) skb->users--; skb->used++; skb_reset_data_header(skb); } } return ret; }
static void send_paxos_message(paxos_message *pm) { uint8_t port_id = 0; struct rte_mbuf *created_pkt = rte_pktmbuf_alloc(mbuf_pool); created_pkt->l2_len = sizeof(struct ether_hdr); created_pkt->l3_len = sizeof(struct ipv4_hdr); created_pkt->l4_len = sizeof(struct udp_hdr) + sizeof(paxos_message); craft_new_packet(&created_pkt, IPv4(192,168,4,99), ACCEPTOR_ADDR, PROPOSER_PORT, ACCEPTOR_PORT, sizeof(paxos_message), port_id); //struct udp_hdr *udp; size_t udp_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); //udp = rte_pktmbuf_mtod_offset(created_pkt, struct udp_hdr *, udp_offset); size_t paxos_offset = udp_offset + sizeof(struct udp_hdr); struct paxos_hdr *px = rte_pktmbuf_mtod_offset(created_pkt, struct paxos_hdr *, paxos_offset); px->msgtype = rte_cpu_to_be_16(pm->type); px->inst = rte_cpu_to_be_32(pm->u.accept.iid); px->inst = rte_cpu_to_be_32(pm->u.accept.iid); px->rnd = rte_cpu_to_be_16(pm->u.accept.ballot); px->vrnd = rte_cpu_to_be_16(pm->u.accept.value_ballot); px->acptid = 0; rte_memcpy(px->paxosval, pm->u.accept.value.paxos_value_val, pm->u.accept.value.paxos_value_len); created_pkt->ol_flags = PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM; const uint16_t nb_tx = rte_eth_tx_burst(port_id, 0, &created_pkt, 1); rte_pktmbuf_free(created_pkt); rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Send %d messages\n", nb_tx); }
static void app_lcore_arp_tx_gratuitous (struct app_lcore_params_io *lp) { uint32_t i; for (i = 0; i < lp->tx.n_nic_queues; i++) { uint8_t port = lp->tx.nic_queues[i].port; uint8_t queue = lp->tx.nic_queues[i].queue; struct rte_mbuf *tmpbuf = rte_ctrlmbuf_alloc (app.pools[0]); if (!tmpbuf) { puts ("Error creating gratuitous ARP"); exit (-1); } tmpbuf->pkt_len = arppktlen; tmpbuf->data_len = arppktlen; tmpbuf->port = port; memcpy (rte_ctrlmbuf_data (tmpbuf), arppkt, arppktlen); rte_eth_macaddr_get (port, (struct ether_addr *)(rte_ctrlmbuf_data (tmpbuf) + 6)); rte_eth_macaddr_get (port, (struct ether_addr *)(rte_ctrlmbuf_data (tmpbuf) + 6 + 6 + 2 + 8)); memcpy (rte_ctrlmbuf_data (tmpbuf) + 6 + 6 + 2 + 14, icmppkt + 6 + 6 + 2 + 4 * 4, 4); if (!rte_eth_tx_burst (port, queue, &tmpbuf, 1)) { puts ("Error sending gratuitous ARP"); exit (-1); } } }
// FIXME: support packet sizes here static inline void main_loop_poisson(struct rte_ring* ring, uint8_t device, uint16_t queue, uint32_t target, uint32_t link_speed) { uint64_t tsc_hz = rte_get_tsc_hz(); // control IPGs instead of IDT as IDTs < packet_time are physically impossible std::default_random_engine rand; uint64_t next_send = 0; struct rte_mbuf* bufs[batch_size]; while (1) { int rc = ring_dequeue(ring, reinterpret_cast<void**>(bufs), batch_size); uint64_t cur = rte_get_tsc_cycles(); // nothing sent for 10 ms, restart rate control if (((int64_t) cur - (int64_t) next_send) > (int64_t) tsc_hz / 100) { next_send = cur; } if (rc == 0) { uint32_t sent = 0; while (sent < batch_size) { uint64_t pkt_time = (bufs[sent]->pkt.pkt_len + 24) * 8 / (link_speed / 1000); uint64_t avg = (uint64_t) (tsc_hz / (1000000000 / target) - pkt_time); std::exponential_distribution<double> distribution(1.0 / avg); while ((cur = rte_get_tsc_cycles()) < next_send); next_send += distribution(rand) + pkt_time; sent += rte_eth_tx_burst(device, queue, bufs + sent, 1); } } } }
/** * Interface to dequeue mbufs from tx_q and burst tx */ static void kni_kni_to_eth(struct kni_port_params *p) { uint8_t i, port_id; unsigned nb_tx, num; uint32_t nb_kni; struct rte_mbuf *pkts_burst[PKT_BURST_SZ]; if (p == NULL) return; port_id = p->port_id; /* Burst rx from kni */ num = rte_kni_rx_burst(p->kni, pkts_burst, PKT_BURST_SZ); if (unlikely(num > PKT_BURST_SZ)) { RTE_LOG(ERR, APP, "Error receiving from KNI\n"); return; } /* Burst tx to eth */ nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num); // kni_stats[port_id].tx_packets += nb_tx; if (unlikely(nb_tx < num)) { /* Free mbufs not tx to NIC */ kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx); // kni_stats[port_id].tx_dropped += num - nb_tx; } return; }
static int test_send_basic_packets(void) { struct rte_mbuf bufs[RING_SIZE]; struct rte_mbuf *pbufs[RING_SIZE]; int i; printf("Testing ring pmd RX/TX\n"); for (i = 0; i < RING_SIZE/2; i++) pbufs[i] = &bufs[i]; if (rte_eth_tx_burst(TX_PORT, 0, pbufs, RING_SIZE/2) < RING_SIZE/2) { printf("Failed to transmit packet burst\n"); return -1; } if (rte_eth_rx_burst(RX_PORT, 0, pbufs, RING_SIZE) != RING_SIZE/2) { printf("Failed to receive packet burst\n"); return -1; } for (i = 0; i < RING_SIZE/2; i++) if (pbufs[i] != &bufs[i]) { printf("Error: received data does not match that transmitted\n"); return -1; } return 0; }
/** * Interface to dequeue mbufs from tx_q and burst tx */ static void kni_egress(struct kni_port_params* p, uint32_t lcore_id) { uint8_t i, port_id; unsigned nb_tx, num; uint32_t nb_kni; struct rte_mbuf* pkts_burst[MAX_PKT_BURST]; uint16_t queue_num; if (p == NULL) return; nb_kni = p->nb_kni; port_id = p->port_id; queue_num = p->tx_queue_id; for (i = 0; i < nb_kni; i++) { /* Burst rx from kni */ num = rte_kni_rx_burst(p->kni[i], pkts_burst, MAX_PKT_BURST); if (unlikely(num > MAX_PKT_BURST)) { RTE_LOG(ERR, KNI, "Error receiving from KNI\n"); return; } /* Burst tx to eth */ nb_tx = rte_eth_tx_burst(port_id, queue_num, pkts_burst, (uint16_t)num); rte_kni_handle_request(p->kni[i]); stats[lcore_id].nb_kni_rx += num; stats[lcore_id].nb_tx += nb_tx; if (unlikely(nb_tx < num)) { /* Free mbufs not tx to NIC */ kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx); stats[lcore_id].nb_kni_dropped += num - nb_tx; } } }
static inline void flush_one_port(struct output_buffer *outbuf, uint8_t outp) { unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs, outbuf->count); if (unlikely(nb_tx < outbuf->count)) { pktmbuf_free_bulk(&outbuf->mbufs[nb_tx], outbuf->count - nb_tx); } outbuf->count = 0; }
static inline void app_lcore_io_tx_kni(struct app_lcore_params_io *lp, uint32_t bsz) { struct rte_mbuf *pkts_burst[bsz]; unsigned num; uint8_t portid; uint16_t nb_tx; unsigned i, j; return; for (i = 0; i < lp->tx.n_nic_ports; i++) { portid = lp->tx.nic_ports[i]; if (lagopus_kni[portid] == NULL) { continue; } num = rte_kni_rx_burst(lagopus_kni[portid], pkts_burst, bsz); if (num == 0 || (uint32_t)num > bsz) { continue; } nb_tx = rte_eth_tx_burst(portid, 0, pkts_burst, (uint16_t)num); if (unlikely(nb_tx < (uint16_t)num)) { /* Free mbufs not tx to NIC */ for (j = nb_tx; j < num; j++) { rte_pktmbuf_free(pkts_burst[j]); } } } }
void xmit_arp_req(struct gatekeeper_if *iface, const struct ipaddr *addr, const struct ether_addr *ha, uint16_t tx_queue) { struct rte_mbuf *created_pkt; struct ether_hdr *eth_hdr; struct arp_hdr *arp_hdr; size_t pkt_size; struct lls_config *lls_conf = get_lls_conf(); int ret; struct rte_mempool *mp = lls_conf->net->gatekeeper_pktmbuf_pool[ rte_lcore_to_socket_id(lls_conf->lcore_id)]; created_pkt = rte_pktmbuf_alloc(mp); if (created_pkt == NULL) { LLS_LOG(ERR, "Could not alloc a packet for an ARP request\n"); return; } pkt_size = iface->l2_len_out + sizeof(struct arp_hdr); created_pkt->data_len = pkt_size; created_pkt->pkt_len = pkt_size; /* Set-up Ethernet header. */ eth_hdr = rte_pktmbuf_mtod(created_pkt, struct ether_hdr *); ether_addr_copy(&iface->eth_addr, ð_hdr->s_addr); if (ha == NULL) memset(ð_hdr->d_addr, 0xFF, ETHER_ADDR_LEN); else ether_addr_copy(ha, ð_hdr->d_addr); /* Set-up VLAN header. */ if (iface->vlan_insert) fill_vlan_hdr(eth_hdr, iface->vlan_tag_be, ETHER_TYPE_ARP); else eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP); /* Set-up ARP header. */ arp_hdr = pkt_out_skip_l2(iface, eth_hdr); arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER); arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4); arp_hdr->arp_hln = ETHER_ADDR_LEN; arp_hdr->arp_pln = sizeof(struct in_addr); arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REQUEST); ether_addr_copy(&iface->eth_addr, &arp_hdr->arp_data.arp_sha); arp_hdr->arp_data.arp_sip = iface->ip4_addr.s_addr; memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN); arp_hdr->arp_data.arp_tip = addr->ip.v4.s_addr; ret = rte_eth_tx_burst(iface->id, tx_queue, &created_pkt, 1); if (ret <= 0) { rte_pktmbuf_free(created_pkt); LLS_LOG(ERR, "Could not transmit an ARP request\n"); } }
static int rte_port_ethdev_writer_nodrop_tx_bulk(void *port, struct rte_mbuf **pkts, uint64_t pkts_mask) { struct rte_port_ethdev_writer_nodrop *p = (struct rte_port_ethdev_writer_nodrop *) port; uint64_t bsz_mask = p->bsz_mask; uint32_t tx_buf_count = p->tx_buf_count; uint64_t expr = (pkts_mask & (pkts_mask + 1)) | ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { uint64_t n_pkts = __builtin_popcountll(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) send_burst_nodrop(p); RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts); n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts, n_pkts); if (n_pkts_ok >= n_pkts) return 0; /* * If we didnt manage to send all packets in single burst, move * remaining packets to the buffer and call send burst. */ for (; n_pkts_ok < n_pkts; n_pkts_ok++) { struct rte_mbuf *pkt = pkts[n_pkts_ok]; p->tx_buf[p->tx_buf_count++] = pkt; } send_burst_nodrop(p); } else { for ( ; pkts_mask; ) { uint32_t pkt_index = __builtin_ctzll(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; p->tx_buf[tx_buf_count++] = pkt; RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1); pkts_mask &= ~pkt_mask; } p->tx_buf_count = tx_buf_count; if (tx_buf_count >= p->tx_burst_sz) send_burst_nodrop(p); } return 0; }
// FIXME: actually do the right thing static inline void main_loop(struct rte_ring* ring, uint8_t device, uint16_t queue) { struct rte_mbuf* bufs[batch_size]; while (1) { int rc = ring_dequeue(ring, reinterpret_cast<void**>(bufs), batch_size); if (rc == 0) { uint32_t sent = 0; while (sent < batch_size) { sent += rte_eth_tx_burst(device, queue, bufs + sent, batch_size - sent); } } } }
static int exec_burst(uint32_t flags, int lcore) { unsigned i, portid, nb_tx = 0; struct lcore_conf *conf; uint32_t pkt_per_port; int num, idx = 0; int diff_tsc; conf = &lcore_conf[lcore]; pkt_per_port = MAX_TRAFFIC_BURST; num = pkt_per_port; rte_atomic64_init(&start); /* start polling thread, but not actually poll yet */ rte_eal_remote_launch(poll_burst, (void *)&pkt_per_port, lcore); /* Only when polling first */ if (flags == SC_BURST_POLL_FIRST) rte_atomic64_set(&start, 1); /* start xmit */ while (num) { nb_tx = RTE_MIN(MAX_PKT_BURST, num); for (i = 0; i < conf->nb_ports; i++) { portid = conf->portlist[i]; rte_eth_tx_burst(portid, 0, &tx_burst[idx], nb_tx); idx += nb_tx; } num -= nb_tx; } sleep(5); /* only when polling second */ if (flags == SC_BURST_XMIT_FIRST) rte_atomic64_set(&start, 1); /* wait for polling finished */ diff_tsc = rte_eal_wait_lcore(lcore); if (diff_tsc < 0) { printf("exec_burst: Failed to measure cycles per packet\n"); return -1; } printf("Result: %d cycles per packet\n", diff_tsc); return 0; }
static inline void app_lcore_io_tx (struct app_lcore_params_io *lp) { uint32_t i; for (i = 0; i < lp->tx.n_nic_queues; i++) { uint8_t port = lp->tx.nic_queues[i].port; uint8_t queue = lp->tx.nic_queues[i].queue; uint32_t n_mbufs, n_pkts; n_mbufs = 1; struct rte_mbuf *tmpbuf = rte_ctrlmbuf_alloc (app.pools[0]); if (!tmpbuf) { continue; } tmpbuf->pkt_len = sndpktlen; tmpbuf->data_len = sndpktlen; tmpbuf->port = port; if (autoIncNum) { (*((uint16_t *)(icmppkt + icmpStart + 2 + 2 + 2)))++; } memcpy (rte_ctrlmbuf_data (tmpbuf), icmppkt, icmppktlen - 8); *((hptl_t *)(rte_ctrlmbuf_data (tmpbuf) + tsoffset)) = hptl_get (); if (doChecksum) { uint16_t cksum; cksum = rte_raw_cksum (rte_ctrlmbuf_data (tmpbuf) + icmpStart, sndpktlen - icmpStart); *((uint16_t *)(rte_ctrlmbuf_data (tmpbuf) + icmpStart + 2)) = ((cksum == 0xffff) ? cksum : ~cksum); } n_pkts = rte_eth_tx_burst (port, queue, &tmpbuf, n_mbufs); if (trainSleep) { hptl_waitns (trainSleep); } if (unlikely (n_pkts < n_mbufs)) { rte_ctrlmbuf_free (tmpbuf); } else { lp->tx.mbuf_out[port].n_mbufs++; if (trainLen && lp->tx.mbuf_out[port].n_mbufs >= trainLen) { hptl_waitns (waitTime); continueRX = 0; hptl_waitns (waitTime); exit (1); } } } }
/* Transmit packets after encapsulating */ int vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { int ret = 0; uint16_t i; for (i = 0; i < nb_pkts; i++) vxlan_tx_process(queue_id, tx_pkts[i]); ret = rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); return ret; }
static inline void send_burst(struct rte_port_ethdev_writer *p) { uint32_t nb_tx; nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf, p->tx_buf_count); for ( ; nb_tx < p->tx_buf_count; nb_tx++) rte_pktmbuf_free(p->tx_buf[nb_tx]); p->tx_buf_count = 0; }
/* * Flush packets scheduled for transmit on ports */ static void flush_pkts(unsigned action) { unsigned i = 0; uint16_t deq_count = PKT_BURST_SIZE; struct rte_mbuf *pkts[PKT_BURST_SIZE] = {0}; struct port_queue *pq = &port_queues[action & PORT_MASK]; struct statistics *s = &vport_stats[action]; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; uint64_t diff_tsc = 0; static uint64_t prev_tsc[MAX_PHYPORTS] = {0}; uint64_t cur_tsc = rte_rdtsc(); unsigned num_pkts; diff_tsc = cur_tsc - prev_tsc[action & PORT_MASK]; if (unlikely(rte_ring_count(pq->tx_q) >= PKT_BURST_SIZE)) { num_pkts = PKT_BURST_SIZE; } else { /* If queue idles with less than PKT_BURST packets, drain it*/ if(unlikely(diff_tsc > drain_tsc)) { num_pkts = rte_ring_count(pq->tx_q); } else { return; } } if (unlikely(rte_ring_dequeue_bulk( pq->tx_q, (void **)pkts, num_pkts) != 0)) return; const uint16_t sent = rte_eth_tx_burst( ports->id[action & PORT_MASK], 0, pkts, num_pkts); prev_tsc[action & PORT_MASK] = cur_tsc; if (unlikely(sent < num_pkts)) { for (i = sent; i < num_pkts; i++) rte_pktmbuf_free(pkts[i]); s->tx_drop += (num_pkts - sent); } else { s->tx += sent; } }
static int test_stats_reset(void) { struct rte_eth_stats stats; struct rte_mbuf buf, *pbuf = &buf; printf("Testing ring PMD stats reset\n"); rte_eth_stats_reset(RXTX_PORT); /* check stats of RXTX port, should all be zero */ rte_eth_stats_get(RXTX_PORT, &stats); if (stats.ipackets != 0 || stats.opackets != 0 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not zero\n"); return -1; } /* send and receive 1 packet and check for stats update */ if (rte_eth_tx_burst(RXTX_PORT, 0, &pbuf, 1) != 1) { printf("Error sending packet to RXTX port\n"); return -1; } if (rte_eth_rx_burst(RXTX_PORT, 0, &pbuf, 1) != 1) { printf("Error receiving packet from RXTX port\n"); return -1; } rte_eth_stats_get(RXTX_PORT, &stats); if (stats.ipackets != 1 || stats.opackets != 1 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } rte_eth_stats_reset(RXTX_PORT); /* check stats of RXTX port, should all be zero */ rte_eth_stats_get(RXTX_PORT, &stats); if (stats.ipackets != 0 || stats.opackets != 0 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not zero\n"); return -1; } return 0; }
/** * A call to tx_sync_ring will try to empty a Netmap TX ring by converting its * buffers into rte_mbufs and sending them out on the rings's dpdk port. */ static int tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number, struct rte_mempool *pool, uint16_t max_burst) { uint32_t i, n_tx; uint16_t burst_size; uint32_t cur_slot, n_used_slots; struct rte_mbuf *tx_mbufs[COMPAT_NETMAP_MAX_BURST]; n_used_slots = ring->num_slots - ring->avail; n_used_slots = RTE_MIN(n_used_slots, max_burst); cur_slot = (ring->cur + ring->avail) & (ring->num_slots - 1); while (n_used_slots) { burst_size = (uint16_t)RTE_MIN(n_used_slots, RTE_DIM(tx_mbufs)); for (i = 0; i < burst_size; i++) { tx_mbufs[i] = rte_pktmbuf_alloc(pool); if (tx_mbufs[i] == NULL) goto err; slot_to_mbuf(ring, cur_slot, tx_mbufs[i]); cur_slot = NETMAP_RING_NEXT(ring, cur_slot); } n_tx = rte_eth_tx_burst(port, ring_number, tx_mbufs, burst_size); /* Update the Netmap ring structure to reflect the change */ ring->avail += n_tx; n_used_slots -= n_tx; /* Return the mbufs that failed to transmit to their pool */ if (unlikely(n_tx != burst_size)) { for (i = n_tx; i < burst_size; i++) rte_pktmbuf_free(tx_mbufs[i]); break; } } return 0; err: for (; i == 0; --i) rte_pktmbuf_free(tx_mbufs[i]); RTE_LOG(ERR, USER1, "Couldn't get mbuf from mempool is the mempool too small?\n"); return -1; }
/* * The lcore main. This is the main thread that does the work, reading from * an input port and writing to an output port. */ static __attribute__((noreturn)) void lcore_main(void) { const uint8_t nb_ports = rte_eth_dev_count(); uint8_t port; /* * Check that the port is on the same NUMA node as the polling thread * for best performance. */ for (port = 0; port < nb_ports; port++) if (rte_eth_dev_socket_id(port) > 0 && rte_eth_dev_socket_id(port) != (int)rte_socket_id()) printf("WARNING, port %u is on remote NUMA node to " "polling thread.\n\tPerformance will " "not be optimal.\n", port); printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n", rte_lcore_id()); /* Run until the application is quit or killed. */ for (;;) { /* * Receive packets on a port and forward them on the paired * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc. */ for (port = 0; port < nb_ports; port++) { /* Get burst of RX packets, from first port of pair. */ struct rte_mbuf *bufs[BURST_SIZE]; const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE); if (unlikely(nb_rx == 0)) continue; /* Send burst of TX packets, to second port of pair. */ const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0, bufs, nb_rx); /* Free any unsent packets. */ if (unlikely(nb_tx < nb_rx)) { uint16_t buf; for (buf = nb_tx; buf < nb_rx; buf++) rte_pktmbuf_free(bufs[buf]); } } } }
static inline void send_burst(struct rte_port_ethdev_writer *p) { uint32_t nb_tx; nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf, p->tx_buf_count); RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx); for ( ; nb_tx < p->tx_buf_count; nb_tx++) rte_pktmbuf_free(p->tx_buf[nb_tx]); p->tx_buf_count = 0; }
static inline void flush_one_port(struct output_buffer *outbuf, uint8_t outp) { unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs, outbuf->count); app_stats.tx.ro_tx_pkts += nb_tx; if (unlikely(nb_tx < outbuf->count)) { /* free the mbufs which failed from transmit */ app_stats.tx.ro_tx_failed_pkts += (outbuf->count - nb_tx); LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__); pktmbuf_free_bulk(&outbuf->mbufs[nb_tx], outbuf->count - nb_tx); } outbuf->count = 0; }
void app_main_loop_tx(void) { uint32_t i; RTE_LOG(INFO, USER1, "Core %u is doing TX\n", rte_lcore_id()); for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { uint16_t n_mbufs, n_pkts; int ret; n_mbufs = app.mbuf_tx[i].n_mbufs; ret = rte_ring_sc_dequeue_bulk( app.rings_tx[i], (void **) &app.mbuf_tx[i].array[n_mbufs], app.burst_size_tx_read); if (ret == -ENOENT) continue; n_mbufs += app.burst_size_tx_read; if (n_mbufs < app.burst_size_tx_write) { app.mbuf_tx[i].n_mbufs = n_mbufs; continue; } n_pkts = rte_eth_tx_burst( app.ports[i], 0, app.mbuf_tx[i].array, n_mbufs); if (n_pkts < n_mbufs) { uint16_t k; for (k = n_pkts; k < n_mbufs; k++) { struct rte_mbuf *pkt_to_free; pkt_to_free = app.mbuf_tx[i].array[k]; rte_pktmbuf_free(pkt_to_free); } } app.mbuf_tx[i].n_mbufs = 0; } }
static int rte_port_ethdev_writer_tx_bulk(void *port, struct rte_mbuf **pkts, uint64_t pkts_mask) { struct rte_port_ethdev_writer *p = (struct rte_port_ethdev_writer *) port; uint64_t bsz_mask = p->bsz_mask; uint32_t tx_buf_count = p->tx_buf_count; uint64_t expr = (pkts_mask & (pkts_mask + 1)) | ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { uint64_t n_pkts = __builtin_popcountll(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) send_burst(p); RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, n_pkts); n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts, n_pkts); RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok); for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) { struct rte_mbuf *pkt = pkts[n_pkts_ok]; rte_pktmbuf_free(pkt); } } else { for ( ; pkts_mask; ) { uint32_t pkt_index = __builtin_ctzll(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; p->tx_buf[tx_buf_count++] = pkt; RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1); pkts_mask &= ~pkt_mask; } p->tx_buf_count = tx_buf_count; if (tx_buf_count >= p->tx_burst_sz) send_burst(p); } return 0; }
static void l2sw_send_burst(struct lcore_env* env, uint8_t dst_port, unsigned n) { struct rte_mbuf **m_table; unsigned ret; m_table = (struct rte_mbuf**)env->tx_mbufs[dst_port].m_table; ret = rte_eth_tx_burst(dst_port, (uint16_t) env->lcore_id, m_table, n); port_statistics[dst_port].tx += ret; if (unlikely(ret < n)) { port_statistics[dst_port].dropped += (n - ret); do { rte_pktmbuf_free(m_table[ret]); } while(++ret < n); } return ; }
int pcap_sendpacket(pcap_t *p, const u_char *buf, int size) { int ret = 0; struct rte_mbuf* mbuf = NULL; if (p == NULL || buf == NULL || p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter"); return DPDKPCAP_FAILURE; } mbuf = rte_pktmbuf_alloc(txPool); if (mbuf == NULL) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Could not allocate buffer on port %d\n", p->deviceId); return DPDKPCAP_FAILURE; } if (mbuf->buf_len < size) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Can not copy packet data : packet size %d, mbuf length %d, port %d\n", size, mbuf->buf_len, p->deviceId); return DPDKPCAP_FAILURE; } rte_memcpy(mbuf->pkt.data, buf, size); mbuf->pkt.data_len = size; mbuf->pkt.pkt_len = size; mbuf->pkt.nb_segs = 1; while (1) { ret = rte_eth_tx_burst(p->deviceId, 0, &mbuf, 1); if (ret == 1) { break; } } debug("Sent a packet to port %d\n", p->deviceId); return DPDKPCAP_OK; }
static int txLoop(void* arg) { int ret = 0; dpdkpcap_tx_args_t* args_p = (dpdkpcap_tx_args_t*)arg; int number = args_p->number; int portId = args_p->portId; int lcoreId = rte_lcore_id(); int packets = 0; int i = 0; debug("Starting transmit: core %u, port %u, packets num %d\n", lcoreId, portId, number); while (1) { ret = rte_eth_tx_burst(portId, 0, mbuf_g, DEF_PKT_BURST); if (ret < DEF_PKT_BURST) { debug("Transmitted %u packets\n", ret); } for (i = 0; i < ret; i++) { // rte_pktmbuf_free(mbuf_g[i]); rte_pktmbuf_refcnt_update(mbuf_g[i], 1); } packets += ret; if (args_p->number > 0) { if (number < 1) break; number -= ret; } } debug("Finished transmit on core %u\n", lcoreId); return DPDKPCAP_OK; }
/********************************************************************** *@description: * Send burst of packets on an output interface * *@parameters: * [in]: * [in]: * *@return values: * **********************************************************************/ static inline int odp_send_burst(struct odp_lcore_config *qconf, uint16_t n, uint8_t port) { struct rte_mbuf **m_table; int ret; uint16_t queueid; queueid = qconf->tx_queue_id[port]; m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; ret = rte_eth_tx_burst(port, queueid, m_table, n); if (unlikely(ret < n)) { do { rte_pktmbuf_free(m_table[ret]); } while (++ret < n); } return 0; }