void kni_net_rx_netlink(struct kni_dev *kni) { struct sk_buff *skb; struct rw_kni_mbuf_metadata *meta_data; while ((skb = skb_dequeue(&kni->skb_rx_queue)) != NULL) { kni->nl_rx_dequeued++; meta_data = (struct rw_kni_mbuf_metadata *)skb->data; skb->dev = kni->net_dev; __skb_pull(skb, sizeof(struct rw_kni_mbuf_metadata)); kni_net_process_rx_packet(skb, kni->net_dev, meta_data); //kni->stats.rx_bytes += pkt_len; } return; }
/* * RX: normal working mode */ static void kni_net_rx_normal(struct kni_dev *kni) { unsigned ret; uint32_t pkt_len; uint32_t data_len; unsigned i, num, num_rq, num_fq; struct rte_kni_mbuf *kva; struct rte_kni_mbuf *va[MBUF_BURST_SZ]; void * data_kva; int copied_len = 0; int num_segs = 0; struct sk_buff *skb; struct net_device *dev = kni->net_dev; if (kni->no_data){ return; } /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); /* Calculate the number of entries to dequeue in rx_q */ num = min(num_rq, num_fq); num = min(num, (unsigned)MBUF_BURST_SZ); /* Return if no entry in rx_q and no free entry in free_q */ if (num == 0) return; /* Burst dequeue from rx_q */ ret = kni_fifo_get(kni->rx_q, (void **)va, num); if (ret == 0) return; /* Failing should not happen */ /* Transfer received packets to netif */ for (i = 0; i < num; i++) { kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; pkt_len = kva->pkt_len; skb = dev_alloc_skb(pkt_len + 2); if (!skb) { KNI_ERR("Out of mem, dropping pkts\n"); /* Update statistics */ kni->stats.rx_dropped++; continue; } /* Align IP on 16B boundary */ skb_reserve(skb, 2); copied_len = 0; num_segs = 0; kva = (void *)va[i]; do { kva = (void *)kva - kni->mbuf_va + kni->mbuf_kva; data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva; data_len = kva->data_len; memcpy(skb_put(skb, data_len), data_kva, data_len); copied_len += data_len; num_segs++; }while((kva = (void *)kva->next) != NULL); /*Go back to the head*/ kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; kni_net_process_rx_packet(skb, dev, &kva->meta_data); kni->stats.rx_bytes += pkt_len; } /* Burst enqueue mbufs into free_q */ ret = kni_fifo_put(kni->free_q, (void **)va, num); if (ret != num) /* Failing should not happen */ KNI_ERR("Fail to enqueue entries into free_q\n"); }