/* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_dropped++; return NETDEV_TX_OK; } #else /*RTE_LIBRW_PIOT*/ static int kni_net_tx(struct sk_buff *skb, struct net_device *dev) { int len = 0; unsigned ret; struct kni_dev *kni = netdev_priv(dev); struct rte_kni_mbuf *pkt_kva = NULL; struct rte_kni_mbuf *pkt_va = NULL; dev->trans_start = jiffies; /* save the timestamp */ /* Check if the length of skb is less than mbuf size */ if (skb->len > kni->mbuf_size){ goto drop; } /** * Check if it has at least one free entry in tx_q and * one entry in alloc_q. */ if (kni_fifo_free_count(kni->tx_q) == 0 || kni_fifo_count(kni->alloc_q) == 0) { /** * If no free entry in tx_q or no entry in alloc_q, * drops skb and goes out. */ goto drop; } /* dequeue a mbuf from alloc_q */ ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1); if (likely(ret == 1)) { void *data_kva; pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva; data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va + kni->mbuf_kva; len = skb->len; memcpy(data_kva, skb->data, len); if (unlikely(len < ETH_ZLEN)) { memset(data_kva + len, 0, ETH_ZLEN - len); len = ETH_ZLEN; } pkt_kva->pkt_len = len; pkt_kva->data_len = len; /* enqueue mbuf into tx_q */ ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1); if (unlikely(ret != 1)) { /* Failing should not happen */ KNI_ERR("Fail to enqueue mbuf into tx_q\n"); goto drop; } } else { /* Failing should not happen */ KNI_ERR("Fail to dequeue mbuf from alloc_q\n"); goto drop; } /* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_bytes += len; kni->stats.tx_packets++; return NETDEV_TX_OK; drop: /* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_dropped++; return NETDEV_TX_OK; }
/* * RX: loopback with enqueue/dequeue fifos and sk buffer copies. */ static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni) { unsigned ret; uint32_t len; unsigned i, num_rq, num_fq, num; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to dequeue from rx_q */ num = min(num_rq, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry to dequeue from rx_q */ if (num == 0) return; /* Burst dequeue mbufs from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Copy mbufs to sk buffer and then call tx interface */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; len = kva->data_len; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; skb = dev_alloc_skb(len + 2); if (skb == NULL) KNI_ERR("Out of mem, dropping pkts\n"); else { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; dev_kfree_skb(skb); } /* Simulate real usage, allocate/copy skb twice */ skb = dev_alloc_skb(len + 2); if (skb == NULL) { KNI_ERR("Out of mem, dropping pkts\n"); kni->stats.rx_dropped++; } else { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; kni->stats.rx_bytes += len; kni->stats.rx_packets++; /* call tx interface */ kni_net_tx(skb, dev); } } /* enqueue all the mbufs from rx_q into free_q */ ret = kni_fifo_put(kni->free_q, (void **)&va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue mbufs into free_q\n"); }
static int kni_net_tx(struct sk_buff *skb, struct net_device *dev) { int len = 0; unsigned ret; struct kni_dev *kni = netdev_priv(dev); struct rte_kni_mbuf *pkt_kva; struct rte_kni_mbuf *pkt_va[RW_FPATH_KNI_MAX_SEGS]; int num_req_mbuf = 1; int err; kni->tx_attempted++; err = skb_linearize(skb); if (unlikely(err)){ goto drop; } #ifdef RTE_LIBRW_NOHUGE if (kni->nohuge){ kni->nl_tx_queued++; skb_queue_tail(&kni->skb_tx_queue, skb); return NETDEV_TX_OK; } #endif dev->trans_start = jiffies; /* save the timestamp */ /* Check if the length of skb is less than mbuf size */ if (skb->len > kni->mbuf_size){ num_req_mbuf = (skb->len/kni->mbuf_size) + 1; if (num_req_mbuf > RW_FPATH_KNI_MAX_SEGS){ goto drop; } } if (kni->no_tx || kni->no_data){ goto drop; } if (kni_fifo_free_count(kni->tx_q) < num_req_mbuf){ kni->tx_no_txq++; /** * If no free entry in tx_q or no entry in alloc_q, * drops skb and goes out. */ goto drop; } if (kni_fifo_count(kni->alloc_q) < num_req_mbuf) { kni->tx_no_allocq++; /** * If no free entry in tx_q or no entry in alloc_q, * drops skb and goes out. */ goto drop; } /* dequeue a mbuf from alloc_q */ ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va[0], num_req_mbuf); if (likely(ret == num_req_mbuf)) { int seg_no = 0; int copylen, remlen; unsigned char *to, *from; int next; struct rte_kni_mbuf **prev; len = skb->len; prev = (struct rte_kni_mbuf **)&pkt_va[seg_no]->next; pkt_kva = (void *)pkt_va[seg_no] - kni->mbuf_va + kni->mbuf_kva; pkt_kva->pkt_len = len; RW_KNI_VF_SET_MDATA_PAYLOAD(&pkt_kva->meta_data, skb->protocol); from = (unsigned char*)skb->data; to = (unsigned char*)(pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va + kni->mbuf_kva); remlen = kni->mbuf_size; next = 0; while (len > 0) { copylen = len; if (copylen > remlen){ next= 1; copylen = remlen; } memcpy(to, from, remlen); to += copylen; from += copylen; len -= copylen; remlen -= copylen; if (unlikely(len < ETH_ZLEN)) { #if 0 //AKKI memset(data_kva + len, 0, ETH_ZLEN - len); len = ETH_ZLEN; #endif } pkt_kva->data_len += copylen; if (next){ seg_no++; *prev = pkt_va[seg_no]; prev = (struct rte_kni_mbuf **)&pkt_va[seg_no]->next; pkt_kva = (void *)pkt_va[seg_no] - kni->mbuf_va + kni->mbuf_kva; to = (unsigned char*)(pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va+ kni->mbuf_kva); remlen = kni->mbuf_size; next = 0; //AKKI increment the nb_segs.. bug bug } } /* enqueue mbuf into tx_q */ ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va[0], 1); if (unlikely(ret != 1)) { /* Failing should not happen */ KNI_ERR("Fail to enqueue mbuf into tx_q\n"); goto drop; } } else { /* Failing should not happen */ KNI_ERR("Fail to dequeue mbuf from alloc_q\n"); goto drop; } /* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_bytes += len; kni->stats.tx_packets++; return NETDEV_TX_OK; drop: /* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_dropped++; return NETDEV_TX_OK; }
/* * RX: loopback with enqueue/dequeue fifos. */ static void kni_net_rx_lo_fifo(struct kni_dev *kni) { unsigned ret; uint32_t len; unsigned i, num, num_rq, num_tq, num_aq, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; struct rte_kni_mbuf *alloc_kva; struct rte_kni_mbuf *alloc_va[MBUF_BURST_SZ]; void *alloc_data_kva; /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entrie in tx_q */ num_tq = kni_fifo_free_count(kni->tx_q); /* Get the number of entries in alloc_q */ num_aq = kni_fifo_count(kni->alloc_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to be dequeued from rx_q */ num = min(num_rq, num_tq); num = min(num, num_aq); num = min(num, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry to dequeue from rx_q */ if (num == 0) return; /* Burst dequeue from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Failing should not happen */ /* Dequeue entries from alloc_q */ ret = kni_fifo_get(kni->alloc_q, (void **)alloc_va, num); if (ret) { num = ret; /* Copy mbufs */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; len = kva->pkt_len; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; alloc_kva = (void *)alloc_va[i] - kni->mbuf_va + kni->mbuf_kva; alloc_data_kva = alloc_kva->buf_addr + alloc_kva->data_off - kni->mbuf_va + kni->mbuf_kva; memcpy(alloc_data_kva, data_kva, len); alloc_kva->pkt_len = len; alloc_kva->data_len = len; kni->stats.tx_bytes += len; kni->stats.rx_bytes += len; } /* Burst enqueue mbufs into tx_q */ ret = kni_fifo_put(kni->tx_q, (void **)alloc_va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue mbufs into tx_q\n"); } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue mbufs into free_q\n"); /** * Update statistic, and enqueue/dequeue failure is impossible, * as all queues are checked at first. */ kni->stats.tx_packets += num; kni->stats.rx_packets += num; }
/* * RX: normal working mode */ static void kni_net_rx_normal(struct kni_dev *kni) { unsigned ret; uint32_t len; unsigned i, num, num_rq, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to dequeue in rx_q */ num = min(num_rq, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry in rx_q and no free entry in free_q */ if (num == 0) return; /* Burst dequeue from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Failing should not happen */ /* Transfer received packets to netif */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; len = kva->data_len; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; skb = dev_alloc_skb(len + 2); if (!skb) { KNI_ERR("Out of mem, dropping pkts\n"); /* Update statistics */ kni->stats.rx_dropped++; } else { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; /* Call netif interface */ netif_rx(skb); /* Update statistics */ kni->stats.rx_bytes += len; kni->stats.rx_packets++; } } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); }
/* * RX: normal working mode */ static void kni_net_rx_normal(struct kni_dev *kni) { unsigned ret; uint32_t pkt_len; uint32_t data_len; unsigned i, num, num_rq, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; int copied_len = 0; int num_segs = 0; struct sk_buff *skb; struct net_device *dev = kni->net_dev; if (kni->no_data){ return; } /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to dequeue in rx_q */ num = min(num_rq, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry in rx_q and no free entry in free_q */ if (num == 0) return; /* Burst dequeue from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Failing should not happen */ /* Transfer received packets to netif */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; pkt_len = kva->pkt_len; skb = dev_alloc_skb(pkt_len + 2); if (!skb) { KNI_ERR("Out of mem, dropping pkts\n"); /* Update statistics */ kni->stats.rx_dropped++; continue; } /* Align IP on 16B boundary */ skb_reserve(skb, 2); copied_len = 0; num_segs = 0; kva = (void *)va[i]; do { kva = (void *)kva - kni->mbuf_va + kni->mbuf_kva; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; data_len = kva->data_len; memcpy(skb_put(skb, data_len), data_kva, data_len); copied_len += data_len; num_segs++; }while((kva = (void *)kva->next) != NULL); /*Go back to the head*/ kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; kni_net_process_rx_packet(skb, dev, &kva->meta_data); kni->stats.rx_bytes += pkt_len; } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); }
static inline int kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m, unsigned offset, unsigned len) { struct rte_kni_mbuf *pkt_kva = NULL; struct rte_kni_mbuf *pkt_va = NULL; int ret; KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n", #ifdef HAVE_IOV_ITER_MSGHDR offset, len, (int)m->msg_iter.iov->iov_len); #else offset, len, (int)m->msg_iov->iov_len); #endif /** * Check if it has at least one free entry in tx_q and * one entry in alloc_q. */ if (kni_fifo_free_count(kni->tx_q) == 0 || kni_fifo_count(kni->alloc_q) == 0) { /** * If no free entry in tx_q or no entry in alloc_q, * drops skb and goes out. */ goto drop; } /* dequeue a mbuf from alloc_q */ ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1); if (likely(ret == 1)) { void *data_kva; pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva; data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va + kni->mbuf_kva; #ifdef HAVE_IOV_ITER_MSGHDR copy_from_iter(data_kva, len, &m->msg_iter); #else memcpy_fromiovecend(data_kva, m->msg_iov, offset, len); #endif if (unlikely(len < ETH_ZLEN)) { memset(data_kva + len, 0, ETH_ZLEN - len); len = ETH_ZLEN; } pkt_kva->pkt_len = len; pkt_kva->data_len = len; /* enqueue mbuf into tx_q */ ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1); if (unlikely(ret != 1)) { /* Failing should not happen */ KNI_ERR("Fail to enqueue mbuf into tx_q\n"); goto drop; } } else { /* Failing should not happen */ KNI_ERR("Fail to dequeue mbuf from alloc_q\n"); goto drop; } /* update statistics */ kni->stats.tx_bytes += len; kni->stats.tx_packets++; return 0; drop: /* update statistics */ kni->stats.tx_dropped++; return 0; }
static inline int kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m, unsigned offset, unsigned len) { uint32_t pkt_len; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va; void * data_kva; struct sk_buff *skb; struct kni_vhost_queue *q = kni->vhost_queue; if (unlikely(q == NULL)) return 0; /* ensure at least one entry in free_q */ if (unlikely(kni_fifo_free_count(kni->free_q) == 0)) return 0; skb = skb_dequeue(&q->sk.sk_receive_queue); if (unlikely(skb == NULL)) return 0; kva = (struct rte_kni_mbuf*)skb->data; /* free skb to cache */ skb->data = NULL; if (unlikely(1 != kni_fifo_put(q->fifo, (void **)&skb, 1))) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into rx cache fifo\n"); pkt_len = kva->data_len; if (unlikely(pkt_len > len)) goto drop; KNI_DBG_RX("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n", #ifdef HAVE_IOV_ITER_MSGHDR offset, len, pkt_len, (int)m->msg_iter.iov->iov_len); #else offset, len, pkt_len, (int)m->msg_iov->iov_len); #endif data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; #ifdef HAVE_IOV_ITER_MSGHDR if (unlikely(copy_to_iter(data_kva, pkt_len, &m->msg_iter))) #else if (unlikely(memcpy_toiovecend(m->msg_iov, data_kva, offset, pkt_len))) #endif goto drop; /* Update statistics */ kni->stats.rx_bytes += pkt_len; kni->stats.rx_packets++; /* enqueue mbufs into free_q */ va = (void*)kva - kni->mbuf_kva + kni->mbuf_va; if (unlikely(1 != kni_fifo_put(kni->free_q, (void **)&va, 1))) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); KNI_DBG_RX("receive done %d\n", pkt_len); return pkt_len; drop: /* Update drop statistics */ kni->stats.rx_dropped++; return 0; }