static void queue_out_process_batch(struct module *m, struct pkt_batch *batch) { struct queue_out_priv *priv = get_priv(m); struct port *p = priv->port; const queue_t qid = priv->qid; uint64_t sent_bytes = 0; int sent_pkts; sent_pkts = priv->send_pkts(p, qid, batch->pkts, batch->cnt); if (!(p->driver->flags & DRIVER_FLAG_SELF_OUT_STATS)) { const packet_dir_t dir = PACKET_DIR_OUT; for (int i = 0; i < sent_pkts; i++) sent_bytes += snb_total_len(batch->pkts[i]); p->queue_stats[dir][qid].packets += sent_pkts; p->queue_stats[dir][qid].dropped += (batch->cnt - sent_pkts); p->queue_stats[dir][qid].bytes += sent_bytes; } if (sent_pkts < batch->cnt) snb_free_bulk(batch->pkts + sent_pkts, batch->cnt - sent_pkts); }
static void port_out_process_batch(struct module *m, struct pkt_batch *batch) { struct port_out_priv *priv = get_priv(m); struct port *p = priv->port; const queue_t qid = 0; /* XXX */ uint64_t sent_bytes = 0; int sent_pkts; sent_pkts = p->driver->send_pkts(p, qid, batch->pkts, batch->cnt); for (int i = 0; i < sent_pkts; i++) sent_bytes += snb_total_len(batch->pkts[i]); p->queue_stats[PACKET_DIR_OUT][qid].packets += sent_pkts; p->queue_stats[PACKET_DIR_OUT][qid].dropped += (batch->cnt - sent_pkts); p->queue_stats[PACKET_DIR_OUT][qid].bytes += sent_bytes; if (sent_pkts < batch->cnt) snb_free_bulk(batch->pkts + sent_pkts, batch->cnt - sent_pkts); }
/* returns nonzero if RX interrupt is needed */ static int put_rx_q(struct port *p, queue_t qid, snb_array_t pkts, int cnt) { struct vport_priv *priv = get_port_priv(p); struct queue *rx_queue = &priv->out_qs[qid]; void *objs[SLOTS_PER_LLRING * 2]; uint64_t bytes = 0; int ret; int i; for (i = 0; i < cnt; i++) { struct sn_rx_metadata *rx_meta; struct snbuf *pkt = pkts[i]; struct rte_mbuf *mbuf; int total_len; total_len = snb_total_len(pkt); rx_meta = (struct sn_rx_metadata *)snb_head_data(pkt) - 1; rx_meta->length = total_len; #if OLD_METADATA rx_meta->gso_mss = pkt->rx.gso_mss; rx_meta->csum_state = pkt->rx.csum_state; #else rx_meta->gso_mss = 0; rx_meta->csum_state = SN_RX_CSUM_UNEXAMINED; #endif rx_meta->host.seg_len = snb_head_len(pkt); rx_meta->host.seg_next = 0; for (mbuf = pkt->mbuf.next; mbuf; mbuf = mbuf->next) { struct sn_rx_metadata *next_rx_meta; next_rx_meta = rte_pktmbuf_mtod(mbuf, struct sn_rx_metadata *) - 1; next_rx_meta->host.seg_len = rte_pktmbuf_data_len(mbuf); next_rx_meta->host.seg_next = 0; rx_meta->host.seg_next = snb_seg_dma_addr(mbuf) - sizeof(struct sn_rx_metadata); rx_meta = next_rx_meta; } objs[i * 2 + 0] = (void *) pkt; objs[i * 2 + 1] = (void *) snb_dma_addr(pkt) - sizeof(struct sn_rx_metadata); bytes += total_len; } ret = llring_enqueue_bulk(rx_queue->sn_to_drv, objs, cnt * 2); if (ret == -LLRING_ERR_NOBUF) return 0; /* TODO: generic notification architecture */ if (__sync_bool_compare_and_swap(&rx_queue->rx_regs->irq_disabled, 0, 1)) { ret = ioctl(priv->fd, SN_IOC_KICK_RX, 1 << priv->map.rxq_to_cpu[qid]); if (ret) perror("ioctl_kick_rx"); } /* TODO: defer this */ /* Lazy deallocation of packet buffers */ ret = llring_dequeue_burst(rx_queue->drv_to_sn, objs, SLOTS_PER_LLRING); if (ret > 0) snb_free_bulk((snb_array_t) objs, ret); return cnt; }