/* Allocate and initialize the packet buffers. */ static int init_pkts_memory(void) { int i; /* Number of buffers is the worst case to fill up TX and RX queues. * For bi-directional forwarding need buffers for both VIs */ pbs.num = RX_RING_SIZE + TX_RING_SIZE; if( ! cfg_unidirectional ) pbs.num = 2 * pbs.num; pbs.mem_size = pbs.num * PKT_BUF_SIZE; pbs.mem_size = ROUND_UP(pbs.mem_size, huge_page_size); /* Allocate memory for DMA transfers. Try mmap() with MAP_HUGETLB to get huge * pages. If that fails, fall back to posix_memalign() and hope that we do * get them. */ pbs.mem = mmap(NULL, pbs.mem_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0); if( pbs.mem == MAP_FAILED ) { fprintf(stderr, "mmap() failed. Are huge pages configured?\n"); /* Allocate huge-page-aligned memory to give best chance of allocating * transparent huge-pages. */ TEST(posix_memalign(&pbs.mem, huge_page_size, pbs.mem_size) == 0); } for( i = 0; i < pbs.num; ++i ) { struct pkt_buf* pkt_buf = pkt_buf_from_id(i); pkt_buf->id = i; pkt_buf_free(pkt_buf); } return 0; }
/* Handle an RX event on a VI. */ static void handle_rx(struct vi_state* vi_state, int pkt_buf_i, int len) { struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, pkt_buf_i); ++vi_state->n_pkts; if( cfg_hexdump ) hexdump(pkt_buf->rx_ptr, len); pkt_buf_free(vi_state, pkt_buf); }
static void pkt_buf_init(struct vi* vi, int pkt_buf_i) { struct pkt_buf* pkt_buf; pkt_buf = pkt_buf_from_id(vi, pkt_buf_i); pkt_buf->vi_owner = vi; pkt_buf->addr[vi->net_if->id] = ef_memreg_dma_addr(&vi->memreg, pkt_buf_i * PKT_BUF_SIZE); pkt_buf->id = pkt_buf_i; pkt_buf->n_refs = 0; pkt_buf_free(pkt_buf); }
/* Event loop callback for outgoing packets. */ static bool event_loop_pkt_out_cb(void *pcap_port_, struct list_node *pkt_buf_) { struct pcap_port *pcap_port = (struct pcap_port *)pcap_port_; struct pkt_buf *pkt_buf = (struct pkt_buf *)pkt_buf_; int ret = pcap_inject(pcap_port->pcap, pkt_buf->data, pkt_buf->data_len); if (ret == -1) { logger_log(pcap_port->logger, LOG_WARN, "Error in pcap_inject: %s.", pcap_geterr(pcap_port->pcap)); pthread_mutex_lock(pcap_port->stats_mutex); pcap_port->of_stats->tx_dropped++; pcap_port->of_stats->tx_errors++; pthread_mutex_unlock(pcap_port->stats_mutex); pkt_buf_free(pkt_buf); //TODO perhaps should buffer for later write return false; // wait a little with the next packet } else if ((ret - pkt_buf->data_len) != 0) { logger_log(pcap_port->logger, LOG_WARN, "Pcap_inject could not send the whole packet: %d (%d).", ret, pkt_buf->data_len); pthread_mutex_lock(pcap_port->stats_mutex); pcap_port->of_stats->tx_dropped++; pcap_port->of_stats->tx_errors++; pthread_mutex_unlock(pcap_port->stats_mutex); pkt_buf_free(pkt_buf); return false; // wait a little with the next packet } else { logger_log(pcap_port->logger, LOG_DEBUG, "Sent packet of length %d.", pkt_buf->data_len); pthread_mutex_lock(pcap_port->stats_mutex); pcap_port->of_stats->tx_bytes += pkt_buf->data_len; pcap_port->of_stats->tx_packets++; pthread_mutex_unlock(pcap_port->stats_mutex); pkt_buf_free(pkt_buf); return true; } }
/* Handle an RX event on a VI. We forward the packet on the other VI. */ static void handle_rx(int rx_vi_i, int pkt_buf_i, int len) { int rc; int tx_vi_i = 2 - 1 - rx_vi_i; struct vi* rx_vi = &vis[rx_vi_i]; struct vi* tx_vi = &vis[tx_vi_i]; struct pkt_buf* pkt_buf = pkt_buf_from_id(pkt_buf_i); ++rx_vi->n_pkts; rc = ef_vi_transmit(&tx_vi->vi, pkt_buf->tx_ef_addr[tx_vi_i], len, pkt_buf->id); if( rc != 0 ) { assert(rc == -EAGAIN); /* TXQ is full. A real app might consider implementing an overflow * queue in software. We simply choose not to send. */ pkt_buf_free(pkt_buf); } }
/* Allocate and initialize the packet buffers. */ static int init_pkts_memory(void) { int i; /* Number of buffers is the worst case to fill up all the queues * assuming that you are going to allocate 2 VIs, both have a RXQ * and TXQ and both have default capacity of 512. */ pbs.num = 4 * 512; pbs.mem_size = pbs.num * PKT_BUF_SIZE; pbs.mem_size = ROUND_UP(pbs.mem_size, huge_page_size); /* Allocate huge-page-aligned memory to give best chance of allocating * transparent huge-pages. */ TEST(posix_memalign(&pbs.mem, huge_page_size, pbs.mem_size) == 0); for( i = 0; i < pbs.num; ++i ) { struct pkt_buf* pkt_buf = pkt_buf_from_id(i); pkt_buf->id = i; pkt_buf_free(pkt_buf); } return 0; }
void pkt_buf_release(struct pkt_buf* pkt_buf) { assert(pkt_buf->n_refs > 0); if( --pkt_buf->n_refs == 0 ) pkt_buf_free(pkt_buf); }
static void handle_rx_discard(struct vi_state* vi_state, int pkt_buf_i, int discard_type) { struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, pkt_buf_i); pkt_buf_free(vi_state, pkt_buf); }
static void complete_tx(int vi_i, int pkt_buf_i) { struct pkt_buf* pkt_buf = pkt_buf_from_id(pkt_buf_i); pkt_buf_free(pkt_buf); }
static void handle_rx_discard(int pkt_buf_i, int discard_type) { struct pkt_buf* pkt_buf = pkt_buf_from_id(pkt_buf_i); pkt_buf_free(pkt_buf); }