/** * Before the last deliver skb to ETH_P_ALL is called, this registered handler will * be called. During this time, we will revert the pkt_type from control buf in skb * * @param[in] skb - double pointer to the skb in case we need to clone.. * * @returns action that needs to be taken on the skb. we can consume it. */ rx_handler_result_t rw_fpath_kni_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = (struct sk_buff *)*pskb; struct kni_dev *kni; rx_handler_result_t ret = RX_HANDLER_PASS; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return RX_HANDLER_CONSUMED; if (!skb->dev){ KNI_ERR("No device in the skb in rx_handler\n"); return RX_HANDLER_PASS; } kni = netdev_priv(skb->dev); if (!kni){ KNI_ERR("no kni private data in the device in rx_handler\n"); return RX_HANDLER_PASS; } *pskb = skb; switch (skb->pkt_type){ case PACKET_OUTGOING: skb->pkt_type = PACKET_OTHERHOST; kni->rx_treat_as_tx_filtered++; consume_skb(skb); ret = RX_HANDLER_CONSUMED; break; case PACKET_LOOPBACK: skb->pkt_type = skb->mark; if (skb->pkt_type == PACKET_OTHERHOST){ /*Force the packet to be accepted by the IP stack*/ skb->pkt_type = 0; } kni->rx_treat_as_tx_delivered++; skb->mark = 0; break; case PACKET_OTHERHOST: kni->rx_filtered++; consume_skb(skb); ret = RX_HANDLER_CONSUMED; break; default: kni->rx_delivered++; break; } return ret; }
/** * This function is called before delivering the skb to the core network in dev.c * Dependding on the mbuf flags, we modify the packet-type. The original packet type * cannot be copied into the control block of the skb since the control block is used * by different layers. If we need to use the control block then we need to clone the skb * * @param[in] mbuf * @param[in] skb - pointer to the skb * * @returns none */ static void rw_fpath_kni_set_skb_packet_type(struct rw_kni_mbuf_metadata *mbuf, struct sk_buff *skb) { int pkt_type; struct kni_dev *kni; if (!mbuf || !skb) return; if (!skb->dev){ KNI_ERR("No device in the skb on receive\n"); return; } kni = netdev_priv(skb->dev); if (!kni){ KNI_ERR("no kni private data in the device on recv\n"); return; } skb->vlan_tci = 0; /*Store the original packet type*/ pkt_type = skb->pkt_type; if (RW_KNI_VF_VALID_MDATA_ACTION_FLAGS(mbuf)){ if (RW_KNI_VF_GET_MDATA_ACTION_FLAGS(mbuf) & RW_FPATH_PKT_KNI_NEED_FLOW_LOOKUP) { skb->vlan_tci = 1; } if (RW_KNI_VF_GET_MDATA_ACTION_FLAGS(mbuf) & RW_FPATH_PKT_KNI_TREAT_AS_TX){ /*The read lock is taken at this time. Ideally it should be the write lock here but this is the only place where the counters are increasing*/ kni->rx_treat_as_tx++; if (RW_KNI_VF_GET_MDATA_ACTION_FLAGS(mbuf) & RW_FPATH_PKT_KNI_DISCARD_ON_RX){ skb->pkt_type = PACKET_OUTGOING; }else{ skb->pkt_type = PACKET_LOOPBACK; } }else{ kni->rx_only++; if (RW_KNI_VF_GET_MDATA_ACTION_FLAGS(mbuf) & RW_FPATH_PKT_KNI_DISCARD_ON_RX){ skb->pkt_type = PACKET_OTHERHOST; } } }else{ kni->rx_only++; } /*Update the packet type in the mark */ skb->mark = pkt_type; }
/* * It can be called to process the request. */ static int kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req) { int ret = -1; void *resp_va; unsigned num; int ret_val; if (!kni || !req) { KNI_ERR("No kni instance or request\n"); return -EINVAL; } mutex_lock(&kni->sync_lock); /* Construct data */ memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request)); num = kni_fifo_put(kni->req_q, &kni->sync_va, 1); if (num < 1) { KNI_ERR("Cannot send to req_q\n"); ret = -EBUSY; goto fail; } ret_val = wait_event_interruptible_timeout(kni->wq, kni_fifo_count(kni->resp_q), 3 * HZ); if (signal_pending(current) || ret_val <= 0) { ret = -ETIME; goto fail; } num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1); if (num != 1 || resp_va != kni->sync_va) { /* This should never happen */ KNI_ERR("No data in resp_q\n"); ret = -ENODATA; goto fail; } memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request)); ret = 0; fail: mutex_unlock(&kni->sync_lock); return ret; }
/* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_dropped++; return NETDEV_TX_OK; } #else /*RTE_LIBRW_PIOT*/ static int kni_net_tx(struct sk_buff *skb, struct net_device *dev) { int len = 0; unsigned ret; struct kni_dev *kni = netdev_priv(dev); struct rte_kni_mbuf *pkt_kva = NULL; struct rte_kni_mbuf *pkt_va = NULL; dev->trans_start = jiffies; /* save the timestamp */ /* Check if the length of skb is less than mbuf size */ if (skb->len > kni->mbuf_size){ goto drop; } /** * Check if it has at least one free entry in tx_q and * one entry in alloc_q. */ if (kni_fifo_free_count(kni->tx_q) == 0 || kni_fifo_count(kni->alloc_q) == 0) { /** * If no free entry in tx_q or no entry in alloc_q, * drops skb and goes out. */ goto drop; } /* dequeue a mbuf from alloc_q */ ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1); if (likely(ret == 1)) { void *data_kva; pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva; data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va + kni->mbuf_kva; len = skb->len; memcpy(data_kva, skb->data, len); if (unlikely(len < ETH_ZLEN)) { memset(data_kva + len, 0, ETH_ZLEN - len); len = ETH_ZLEN; } pkt_kva->pkt_len = len; pkt_kva->data_len = len; /* enqueue mbuf into tx_q */ ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1); if (unlikely(ret != 1)) { /* Failing should not happen */ KNI_ERR("Fail to enqueue mbuf into tx_q\n"); goto drop; } } else { /* Failing should not happen */ KNI_ERR("Fail to dequeue mbuf from alloc_q\n"); goto drop; } /* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_bytes += len; kni->stats.tx_packets++; return NETDEV_TX_OK; drop: /* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_dropped++; return NETDEV_TX_OK; }
static int kni_net_tx(struct sk_buff *skb, struct net_device *dev) { int len = 0; unsigned ret; struct kni_dev *kni = netdev_priv(dev); struct rte_kni_mbuf *pkt_kva; struct rte_kni_mbuf *pkt_va[RW_FPATH_KNI_MAX_SEGS]; int num_req_mbuf = 1; int err; kni->tx_attempted++; err = skb_linearize(skb); if (unlikely(err)){ goto drop; } #ifdef RTE_LIBRW_NOHUGE if (kni->nohuge){ kni->nl_tx_queued++; skb_queue_tail(&kni->skb_tx_queue, skb); return NETDEV_TX_OK; } #endif dev->trans_start = jiffies; /* save the timestamp */ /* Check if the length of skb is less than mbuf size */ if (skb->len > kni->mbuf_size){ num_req_mbuf = (skb->len/kni->mbuf_size) + 1; if (num_req_mbuf > RW_FPATH_KNI_MAX_SEGS){ goto drop; } } if (kni->no_tx || kni->no_data){ goto drop; } if (kni_fifo_free_count(kni->tx_q) < num_req_mbuf){ kni->tx_no_txq++; /** * If no free entry in tx_q or no entry in alloc_q, * drops skb and goes out. */ goto drop; } if (kni_fifo_count(kni->alloc_q) < num_req_mbuf) { kni->tx_no_allocq++; /** * If no free entry in tx_q or no entry in alloc_q, * drops skb and goes out. */ goto drop; } /* dequeue a mbuf from alloc_q */ ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va[0], num_req_mbuf); if (likely(ret == num_req_mbuf)) { int seg_no = 0; int copylen, remlen; unsigned char *to, *from; int next; struct rte_kni_mbuf **prev; len = skb->len; prev = (struct rte_kni_mbuf **)&pkt_va[seg_no]->next; pkt_kva = (void *)pkt_va[seg_no] - kni->mbuf_va + kni->mbuf_kva; pkt_kva->pkt_len = len; RW_KNI_VF_SET_MDATA_PAYLOAD(&pkt_kva->meta_data, skb->protocol); from = (unsigned char*)skb->data; to = (unsigned char*)(pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va + kni->mbuf_kva); remlen = kni->mbuf_size; next = 0; while (len > 0) { copylen = len; if (copylen > remlen){ next= 1; copylen = remlen; } memcpy(to, from, remlen); to += copylen; from += copylen; len -= copylen; remlen -= copylen; if (unlikely(len < ETH_ZLEN)) { #if 0 //AKKI memset(data_kva + len, 0, ETH_ZLEN - len); len = ETH_ZLEN; #endif } pkt_kva->data_len += copylen; if (next){ seg_no++; *prev = pkt_va[seg_no]; prev = (struct rte_kni_mbuf **)&pkt_va[seg_no]->next; pkt_kva = (void *)pkt_va[seg_no] - kni->mbuf_va + kni->mbuf_kva; to = (unsigned char*)(pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va+ kni->mbuf_kva); remlen = kni->mbuf_size; next = 0; //AKKI increment the nb_segs.. bug bug } } /* enqueue mbuf into tx_q */ ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va[0], 1); if (unlikely(ret != 1)) { /* Failing should not happen */ KNI_ERR("Fail to enqueue mbuf into tx_q\n"); goto drop; } } else { /* Failing should not happen */ KNI_ERR("Fail to dequeue mbuf from alloc_q\n"); goto drop; } /* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_bytes += len; kni->stats.tx_packets++; return NETDEV_TX_OK; drop: /* Free skb and update statistics */ dev_kfree_skb(skb); kni->stats.tx_dropped++; return NETDEV_TX_OK; }
/* * RX: loopback with enqueue/dequeue fifos and sk buffer copies. */ static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni) { unsigned ret; uint32_t len; unsigned i, num_rq, num_fq, num; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to dequeue from rx_q */ num = min(num_rq, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry to dequeue from rx_q */ if (num == 0) return; /* Burst dequeue mbufs from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Copy mbufs to sk buffer and then call tx interface */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; len = kva->data_len; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; skb = dev_alloc_skb(len + 2); if (skb == NULL) KNI_ERR("Out of mem, dropping pkts\n"); else { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; dev_kfree_skb(skb); } /* Simulate real usage, allocate/copy skb twice */ skb = dev_alloc_skb(len + 2); if (skb == NULL) { KNI_ERR("Out of mem, dropping pkts\n"); kni->stats.rx_dropped++; } else { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; kni->stats.rx_bytes += len; kni->stats.rx_packets++; /* call tx interface */ kni_net_tx(skb, dev); } } /* enqueue all the mbufs from rx_q into free_q */ ret = kni_fifo_put(kni->free_q, (void **)&va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue mbufs into free_q\n"); }
/* * RX: loopback with enqueue/dequeue fifos. */ static void kni_net_rx_lo_fifo(struct kni_dev *kni) { unsigned ret; uint32_t len; unsigned i, num, num_rq, num_tq, num_aq, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; struct rte_kni_mbuf *alloc_kva; struct rte_kni_mbuf *alloc_va[MBUF_BURST_SZ]; void *alloc_data_kva; /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entrie in tx_q */ num_tq = kni_fifo_free_count(kni->tx_q); /* Get the number of entries in alloc_q */ num_aq = kni_fifo_count(kni->alloc_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to be dequeued from rx_q */ num = min(num_rq, num_tq); num = min(num, num_aq); num = min(num, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry to dequeue from rx_q */ if (num == 0) return; /* Burst dequeue from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Failing should not happen */ /* Dequeue entries from alloc_q */ ret = kni_fifo_get(kni->alloc_q, (void **)alloc_va, num); if (ret) { num = ret; /* Copy mbufs */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; len = kva->pkt_len; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; alloc_kva = (void *)alloc_va[i] - kni->mbuf_va + kni->mbuf_kva; alloc_data_kva = alloc_kva->buf_addr + alloc_kva->data_off - kni->mbuf_va + kni->mbuf_kva; memcpy(alloc_data_kva, data_kva, len); alloc_kva->pkt_len = len; alloc_kva->data_len = len; kni->stats.tx_bytes += len; kni->stats.rx_bytes += len; } /* Burst enqueue mbufs into tx_q */ ret = kni_fifo_put(kni->tx_q, (void **)alloc_va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue mbufs into tx_q\n"); } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue mbufs into free_q\n"); /** * Update statistic, and enqueue/dequeue failure is impossible, * as all queues are checked at first. */ kni->stats.tx_packets += num; kni->stats.rx_packets += num; }
/* * RX: normal working mode */ static void kni_net_rx_normal(struct kni_dev *kni) { unsigned ret; uint32_t len; unsigned i, num, num_rq, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to dequeue in rx_q */ num = min(num_rq, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry in rx_q and no free entry in free_q */ if (num == 0) return; /* Burst dequeue from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Failing should not happen */ /* Transfer received packets to netif */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; len = kva->data_len; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; skb = dev_alloc_skb(len + 2); if (!skb) { KNI_ERR("Out of mem, dropping pkts\n"); /* Update statistics */ kni->stats.rx_dropped++; } else { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; /* Call netif interface */ netif_rx(skb); /* Update statistics */ kni->stats.rx_bytes += len; kni->stats.rx_packets++; } } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); }
/* * RX: normal working mode */ static void kni_net_rx_normal(struct kni_dev *kni) { unsigned ret; uint32_t pkt_len; uint32_t data_len; unsigned i, num, num_rq, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; int copied_len = 0; int num_segs = 0; struct sk_buff *skb; struct net_device *dev = kni->net_dev; if (kni->no_data){ return; } /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to dequeue in rx_q */ num = min(num_rq, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry in rx_q and no free entry in free_q */ if (num == 0) return; /* Burst dequeue from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Failing should not happen */ /* Transfer received packets to netif */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; pkt_len = kva->pkt_len; skb = dev_alloc_skb(pkt_len + 2); if (!skb) { KNI_ERR("Out of mem, dropping pkts\n"); /* Update statistics */ kni->stats.rx_dropped++; continue; } /* Align IP on 16B boundary */ skb_reserve(skb, 2); copied_len = 0; num_segs = 0; kva = (void *)va[i]; do { kva = (void *)kva - kni->mbuf_va + kni->mbuf_kva; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; data_len = kva->data_len; memcpy(skb_put(skb, data_len), data_kva, data_len); copied_len += data_len; num_segs++; }while((kva = (void *)kva->next) != NULL); /*Go back to the head*/ kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; kni_net_process_rx_packet(skb, dev, &kva->meta_data); kni->stats.rx_bytes += pkt_len; } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); }
static inline int kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m, unsigned offset, unsigned len) { struct rte_kni_mbuf *pkt_kva = NULL; struct rte_kni_mbuf *pkt_va = NULL; int ret; KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n", #ifdef HAVE_IOV_ITER_MSGHDR offset, len, (int)m->msg_iter.iov->iov_len); #else offset, len, (int)m->msg_iov->iov_len); #endif /** * Check if it has at least one free entry in tx_q and * one entry in alloc_q. */ if (kni_fifo_free_count(kni->tx_q) == 0 || kni_fifo_count(kni->alloc_q) == 0) { /** * If no free entry in tx_q or no entry in alloc_q, * drops skb and goes out. */ goto drop; } /* dequeue a mbuf from alloc_q */ ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1); if (likely(ret == 1)) { void *data_kva; pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva; data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va + kni->mbuf_kva; #ifdef HAVE_IOV_ITER_MSGHDR copy_from_iter(data_kva, len, &m->msg_iter); #else memcpy_fromiovecend(data_kva, m->msg_iov, offset, len); #endif if (unlikely(len < ETH_ZLEN)) { memset(data_kva + len, 0, ETH_ZLEN - len); len = ETH_ZLEN; } pkt_kva->pkt_len = len; pkt_kva->data_len = len; /* enqueue mbuf into tx_q */ ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1); if (unlikely(ret != 1)) { /* Failing should not happen */ KNI_ERR("Fail to enqueue mbuf into tx_q\n"); goto drop; } } else { /* Failing should not happen */ KNI_ERR("Fail to dequeue mbuf from alloc_q\n"); goto drop; } /* update statistics */ kni->stats.tx_bytes += len; kni->stats.tx_packets++; return 0; drop: /* update statistics */ kni->stats.tx_dropped++; return 0; }
int kni_chk_vhost_rx(struct kni_dev *kni) { struct kni_vhost_queue *q = kni->vhost_queue; unsigned nb_in, nb_mbuf, nb_skb; const unsigned BURST_MASK = RX_BURST_SZ - 1; unsigned nb_burst, nb_backlog, i; struct sk_buff *skb[RX_BURST_SZ]; struct rte_kni_mbuf *va[RX_BURST_SZ]; if (unlikely(BE_STOP & kni->vq_status)) { kni->vq_status |= BE_FINISH; return 0; } if (unlikely(q == NULL)) return 0; nb_skb = kni_fifo_count(q->fifo); nb_mbuf = kni_fifo_count(kni->rx_q); nb_in = min(nb_mbuf, nb_skb); nb_in = min(nb_in, (unsigned)RX_BURST_SZ); nb_burst = (nb_in & ~BURST_MASK); nb_backlog = (nb_in & BURST_MASK); /* enqueue skb_queue per BURST_SIZE bulk */ if (0 != nb_burst) { if (unlikely(RX_BURST_SZ != kni_fifo_get( kni->rx_q, (void **)&va, RX_BURST_SZ))) goto except; if (unlikely(RX_BURST_SZ != kni_fifo_get( q->fifo, (void **)&skb, RX_BURST_SZ))) goto except; kni_vhost_enqueue_burst(kni, q, skb, va); } /* all leftover, do one by one */ for (i = 0; i < nb_backlog; ++i) { if (unlikely(1 != kni_fifo_get( kni->rx_q,(void **)&va, 1))) goto except; if (unlikely(1 != kni_fifo_get( q->fifo, (void **)&skb, 1))) goto except; kni_vhost_enqueue(kni, q, *skb, *va); } /* Ondemand wake up */ if ((nb_in == RX_BURST_SZ) || (nb_skb == 0) || ((nb_mbuf < RX_BURST_SZ) && (nb_mbuf != 0))) { wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); KNI_DBG_RX("RX CHK KICK nb_mbuf %d, nb_skb %d, nb_in %d\n", nb_mbuf, nb_skb, nb_in); } return 0; except: /* Failing should not happen */ KNI_ERR("Fail to enqueue fifo, it shouldn't happen \n"); BUG_ON(1); return 0; }
static inline int kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m, unsigned offset, unsigned len) { uint32_t pkt_len; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va; void * data_kva; struct sk_buff *skb; struct kni_vhost_queue *q = kni->vhost_queue; if (unlikely(q == NULL)) return 0; /* ensure at least one entry in free_q */ if (unlikely(kni_fifo_free_count(kni->free_q) == 0)) return 0; skb = skb_dequeue(&q->sk.sk_receive_queue); if (unlikely(skb == NULL)) return 0; kva = (struct rte_kni_mbuf*)skb->data; /* free skb to cache */ skb->data = NULL; if (unlikely(1 != kni_fifo_put(q->fifo, (void **)&skb, 1))) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into rx cache fifo\n"); pkt_len = kva->data_len; if (unlikely(pkt_len > len)) goto drop; KNI_DBG_RX("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n", #ifdef HAVE_IOV_ITER_MSGHDR offset, len, pkt_len, (int)m->msg_iter.iov->iov_len); #else offset, len, pkt_len, (int)m->msg_iov->iov_len); #endif data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; #ifdef HAVE_IOV_ITER_MSGHDR if (unlikely(copy_to_iter(data_kva, pkt_len, &m->msg_iter))) #else if (unlikely(memcpy_toiovecend(m->msg_iov, data_kva, offset, pkt_len))) #endif goto drop; /* Update statistics */ kni->stats.rx_bytes += pkt_len; kni->stats.rx_packets++; /* enqueue mbufs into free_q */ va = (void*)kva - kni->mbuf_kva + kni->mbuf_va; if (unlikely(1 != kni_fifo_put(kni->free_q, (void **)&va, 1))) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); KNI_DBG_RX("receive done %d\n", pkt_len); return pkt_len; drop: /* Update drop statistics */ kni->stats.rx_dropped++; return 0; }