static void app_lcore_arp_tx_gratuitous (struct app_lcore_params_io *lp) { uint32_t i; for (i = 0; i < lp->tx.n_nic_queues; i++) { uint8_t port = lp->tx.nic_queues[i].port; uint8_t queue = lp->tx.nic_queues[i].queue; struct rte_mbuf *tmpbuf = rte_ctrlmbuf_alloc (app.pools[0]); if (!tmpbuf) { puts ("Error creating gratuitous ARP"); exit (-1); } tmpbuf->pkt_len = arppktlen; tmpbuf->data_len = arppktlen; tmpbuf->port = port; memcpy (rte_ctrlmbuf_data (tmpbuf), arppkt, arppktlen); rte_eth_macaddr_get (port, (struct ether_addr *)(rte_ctrlmbuf_data (tmpbuf) + 6)); rte_eth_macaddr_get (port, (struct ether_addr *)(rte_ctrlmbuf_data (tmpbuf) + 6 + 6 + 2 + 8)); memcpy (rte_ctrlmbuf_data (tmpbuf) + 6 + 6 + 2 + 14, icmppkt + 6 + 6 + 2 + 4 * 4, 4); if (!rte_eth_tx_burst (port, queue, &tmpbuf, 1)) { puts ("Error sending gratuitous ARP"); exit (-1); } } }
void app_ping(void) { unsigned i; uint64_t timestamp, diff_tsc; const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC; for (i = 0; i < RTE_MAX_LCORE; i++) { struct app_core_params *p = &app.cores[i]; struct rte_ring *ring_req, *ring_resp; void *msg; struct app_msg_req *req; int status; if ((p->core_type != APP_CORE_FC) && (p->core_type != APP_CORE_FW) && (p->core_type != APP_CORE_RT) && (p->core_type != APP_CORE_RX)) continue; ring_req = app_get_ring_req(p->core_id); ring_resp = app_get_ring_resp(p->core_id); /* Fill request message */ msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool); if (msg == NULL) rte_panic("Unable to allocate new message\n"); req = (struct app_msg_req *) rte_ctrlmbuf_data((struct rte_mbuf *)msg); req->type = APP_MSG_REQ_PING; /* Send request */ do { status = rte_ring_sp_enqueue(ring_req, msg); } while (status == -ENOBUFS); /* Wait for response */ timestamp = rte_rdtsc(); do { status = rte_ring_sc_dequeue(ring_resp, &msg); diff_tsc = rte_rdtsc() - timestamp; if (unlikely(diff_tsc > timeout)) rte_panic("Core %u of type %d does not respond " "to requests\n", p->core_id, p->core_type); } while (status != 0); /* Free message buffer */ rte_ctrlmbuf_free(msg); } }
static inline void app_lcore_io_tx (struct app_lcore_params_io *lp) { uint32_t i; for (i = 0; i < lp->tx.n_nic_queues; i++) { uint8_t port = lp->tx.nic_queues[i].port; uint8_t queue = lp->tx.nic_queues[i].queue; uint32_t n_mbufs, n_pkts; n_mbufs = 1; struct rte_mbuf *tmpbuf = rte_ctrlmbuf_alloc (app.pools[0]); if (!tmpbuf) { continue; } tmpbuf->pkt_len = sndpktlen; tmpbuf->data_len = sndpktlen; tmpbuf->port = port; if (autoIncNum) { (*((uint16_t *)(icmppkt + icmpStart + 2 + 2 + 2)))++; } memcpy (rte_ctrlmbuf_data (tmpbuf), icmppkt, icmppktlen - 8); *((hptl_t *)(rte_ctrlmbuf_data (tmpbuf) + tsoffset)) = hptl_get (); if (doChecksum) { uint16_t cksum; cksum = rte_raw_cksum (rte_ctrlmbuf_data (tmpbuf) + icmpStart, sndpktlen - icmpStart); *((uint16_t *)(rte_ctrlmbuf_data (tmpbuf) + icmpStart + 2)) = ((cksum == 0xffff) ? cksum : ~cksum); } n_pkts = rte_eth_tx_burst (port, queue, &tmpbuf, n_mbufs); if (trainSleep) { hptl_waitns (trainSleep); } if (unlikely (n_pkts < n_mbufs)) { rte_ctrlmbuf_free (tmpbuf); } else { lp->tx.mbuf_out[port].n_mbufs++; if (trainLen && lp->tx.mbuf_out[port].n_mbufs >= trainLen) { hptl_waitns (waitTime); continueRX = 0; hptl_waitns (waitTime); exit (1); } } } }
/* * Send a reply message to the vswitchd */ static void send_reply_to_vswitchd(struct dpdk_message *reply) { int rslt = 0; struct rte_mbuf *mbuf = NULL; void *ctrlmbuf_data = NULL; struct client *vswd = NULL; struct statistics *vswd_stat = NULL; vswd = &clients[VSWITCHD]; vswd_stat = &vport_stats[VSWITCHD]; /* Preparing the buffer to send */ mbuf = rte_ctrlmbuf_alloc(pktmbuf_pool); if (!mbuf) { RTE_LOG(WARNING, APP, "Error : Unable to allocate an mbuf : %s : %d", __FUNCTION__, __LINE__); switch_tx_drop++; vswd_stat->rx_drop++; return; } ctrlmbuf_data = rte_ctrlmbuf_data(mbuf); rte_memcpy(ctrlmbuf_data, reply, sizeof(*reply)); rte_ctrlmbuf_len(mbuf) = sizeof(*reply); /* Sending the buffer to vswitchd */ rslt = rte_ring_sp_enqueue(vswd->rx_q, mbuf); if (rslt < 0) { if (rslt == -ENOBUFS) { rte_ctrlmbuf_free(mbuf); switch_tx_drop++; vswd_stat->rx_drop++; } else { overruns++; } } vswd_stat->tx++; }
/* * test control mbuf */ static int test_one_ctrlmbuf(void) { struct rte_mbuf *m = NULL; char message[] = "This is a message carried by a ctrlmbuf"; printf("Test ctrlmbuf API\n"); /* alloc a mbuf */ m = rte_ctrlmbuf_alloc(ctrlmbuf_pool); if (m == NULL) GOTO_FAIL("Cannot allocate mbuf"); if (rte_ctrlmbuf_len(m) != 0) GOTO_FAIL("Bad length"); /* set data */ rte_ctrlmbuf_data(m) = &message; rte_ctrlmbuf_len(m) = sizeof(message); /* read data */ if (rte_ctrlmbuf_data(m) != message) GOTO_FAIL("Invalid data pointer"); if (rte_ctrlmbuf_len(m) != sizeof(message)) GOTO_FAIL("Invalid len"); rte_mbuf_sanity_check(m, RTE_MBUF_CTRL, 0); /* free mbuf */ rte_ctrlmbuf_free(m); m = NULL; return 0; fail: if (m) rte_ctrlmbuf_free(m); return -1; }
static inline void app_lcore_io_tx_sts (struct app_lcore_params_io *lp, uint32_t bsz_wr) { uint32_t i; uint32_t k; for (i = 0; i < lp->tx.n_nic_queues; i++) { uint8_t port = lp->tx.nic_queues[i].port; uint8_t queue = lp->tx.nic_queues[i].queue; uint32_t n_mbufs, n_pkts; n_mbufs = bsz_wr; for (k = 0; k < n_mbufs; k++) { lp->tx.mbuf_out[port].array[k] = rte_ctrlmbuf_alloc (app.pools[0]); if (lp->tx.mbuf_out[port].array[k] == NULL) { n_mbufs = k; break; } lp->tx.mbuf_out[port].array[k]->pkt_len = sndpktlen; lp->tx.mbuf_out[port].array[k]->data_len = sndpktlen; lp->tx.mbuf_out[port].array[k]->port = port; memcpy (rte_ctrlmbuf_data (lp->tx.mbuf_out[port].array[k]), icmppkt, icmppktlen > sndpktlen ? sndpktlen : icmppktlen); } if (queue == 0) { *((uint16_t *)(rte_ctrlmbuf_data (lp->tx.mbuf_out[port].array[0]) + idoffset)) = (TSIDTYPE)tspacketId; if (autoIncNum) { *((uint16_t *)(rte_ctrlmbuf_data (lp->tx.mbuf_out[port].array[0]) + cntroffset)) = pktcounter++; if (pktcounter > trainLen) { hptl_waitns (waitTime); continueRX = 0; hptl_waitns (waitTime); exit (1); } } *(hptl_t *)(rte_ctrlmbuf_data (lp->tx.mbuf_out[port].array[0]) + tsoffset) = hptl_get (); } if (doChecksum) { uint16_t cksum; cksum = rte_raw_cksum (rte_ctrlmbuf_data (lp->tx.mbuf_out[port].array[0]) + icmpStart, sndpktlen - icmpStart); *((uint16_t *)(rte_ctrlmbuf_data (lp->tx.mbuf_out[port].array[0]) + icmpStart + 2)) = ((cksum == 0xffff) ? cksum : ~cksum); } n_pkts = rte_eth_tx_burst (port, queue, lp->tx.mbuf_out[port].array, n_mbufs); if (n_pkts == 0) { pktcounter--; for (k = n_pkts; k < n_mbufs; k++) { struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k]; rte_ctrlmbuf_free (pkt_to_free); } } else { while (unlikely (n_pkts < n_mbufs)) { uint64_t tmp; tmp = rte_eth_tx_burst ( port, queue, lp->tx.mbuf_out[port].array + n_pkts, n_mbufs - n_pkts); n_pkts += tmp; } } } }
static inline void app_lcore_io_tx_bw (struct app_lcore_params_io *lp, uint32_t bsz_wr) { uint32_t i; uint32_t k; for (i = 0; i < lp->tx.n_nic_queues; i++) { uint8_t port = lp->tx.nic_queues[i].port; uint8_t queue = lp->tx.nic_queues[i].queue; uint32_t n_mbufs, n_pkts; n_mbufs = bsz_wr; for (k = 0; k < n_mbufs; k++) { lp->tx.mbuf_out[port].array[k] = rte_ctrlmbuf_alloc (app.pools[0]); if (lp->tx.mbuf_out[port].array[k] == NULL) { n_mbufs = k; break; } lp->tx.mbuf_out[port].array[k]->pkt_len = sndpktlen; lp->tx.mbuf_out[port].array[k]->data_len = sndpktlen; lp->tx.mbuf_out[port].array[k]->port = port; memcpy (rte_ctrlmbuf_data (lp->tx.mbuf_out[port].array[k]), icmppkt, icmppktlen); } n_pkts = rte_eth_tx_burst (port, queue, lp->tx.mbuf_out[port].array, n_mbufs); #if APP_STATS lp->tx.nic_queues_iters[i]++; lp->tx.nic_queues_count[i] += n_mbufs; if (unlikely (lp->tx.nic_queues_iters[i] == APP_STATS)) { struct rte_eth_stats stats; struct timeval start_ewr, end_ewr; rte_eth_stats_get (port, &stats); gettimeofday (&lp->tx.end_ewr, NULL); start_ewr = lp->tx.start_ewr; end_ewr = lp->tx.end_ewr; if (queue == 0) { printf ( "NIC TX port %u: drop ratio = %.2f (%lu/%lu) usefull-speed: %lf Gbps, " "link-speed: %lf Gbps (%.1lf pkts/s)\n", (unsigned)port, (double)stats.oerrors / (double)(stats.oerrors + stats.opackets), (uint64_t)stats.opackets, (uint64_t)stats.oerrors, (stats.obytes / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)) / (1000 * 1000 * 1000. / 8.), (((stats.obytes) + stats.opackets * (/*4crc+8prelud+12ifg*/ (8 + 12))) / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)) / (1000 * 1000 * 1000. / 8.), stats.opackets / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)); rte_eth_stats_reset (port); lp->tx.start_ewr = end_ewr; // Updating start } lp->tx.nic_queues_iters[i] = 0; lp->tx.nic_queues_count[i] = 0; } #endif if (unlikely (n_pkts < n_mbufs)) { for (k = n_pkts; k < n_mbufs; k++) { struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k]; rte_ctrlmbuf_free (pkt_to_free); } } } }