// Called when a VM starts a vhost-user device static int new_device(struct virtio_net *dev) { struct virtio_net_ll* lldev = (struct virtio_net_ll*) malloc( sizeof(struct virtio_net_ll) ); int q_no; pthread_mutex_lock(&ll_virtio_net_lock); lldev->dev = dev; lldev->next = ll_virtio_net_root; ll_virtio_net_root = lldev; dev->priv = lldev; pthread_mutex_unlock(&ll_virtio_net_lock); lldev->nb_queues = dev->virt_qp_nb; lldev->queue = (struct virtqueue*) malloc(sizeof(struct virtqueue) * lldev->nb_queues * VIRTIO_QNUM); #define VIRTIO_RXQ_NO(X) ((X) * VIRTIO_QNUM + VIRTIO_RXQ) #define VIRTIO_TXQ_NO(X) ((X) * VIRTIO_QNUM + VIRTIO_TXQ) for (q_no = 0; q_no < lldev->nb_queues; q_no++) { lldev->queue[q_no].callfd = dev->virtqueue[VIRTIO_RXQ_NO(q_no)]->callfd; lldev->queue[q_no].kickfd = dev->virtqueue[VIRTIO_TXQ_NO(q_no)]->kickfd; lldev->queue[q_no].rxq = dev->virtqueue[VIRTIO_TXQ_NO(q_no)]; lldev->queue[q_no].txq = dev->virtqueue[VIRTIO_RXQ_NO(q_no)]; rte_atomic64_clear(&lldev->queue[q_no].rx_packets); rte_atomic64_clear(&lldev->queue[q_no].tx_packets); rte_atomic64_clear(&lldev->queue[q_no].dropped_packets); rte_atomic64_clear(&lldev->queue[q_no].error_packets); lldev->queue[q_no].entry_read = 0; rte_atomic32_clear(&lldev->queue[q_no].taxi_count); } // Link up dev->flags |= VIRTIO_DEV_RUNNING; // Schedule the BH for fixups if (schedule_work(virtio_new_device_bh, lldev) < 0) { log_crit("Failed to schedul work for new device (%ld)\n", dev->device_fh); dev->flags &= ~VIRTIO_DEV_RUNNING; free(lldev->queue); free(lldev); return -1; } return 0; }
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; bnxt_clear_all_hwrm_stat_ctxs(bp); rte_atomic64_clear(&bp->rx_mbuf_alloc_fail); }
// Attach a VIF to a vrf struct vif* vif_add(char* name, uint8_t* ip, uint8_t mask, uint8_t* macaddr, uint32_t label, char* path, int cpus, int cpusets[]) { int i; struct vif* vif = (struct vif*) malloc (sizeof(struct vif)); if (!vif) { log_crit("Failed to allocated memory for vif struct (%s)\n", name); return NULL; } strcpy(vif->name, name); vif->label = label; vif->mask = mask; memcpy(vif->ip, ip, 4); memcpy(vif->macaddr, macaddr, 4); strcpy(vif->path, path); rte_atomic64_clear(&vif->rx_packets); rte_atomic64_clear(&vif->tx_packets); rte_atomic64_clear(&vif->dropped_packets); rte_atomic64_clear(&vif->error_packets); vif->cpus = cpus; for (i = 0; i < cpus; i++) { CPU_ZERO(&vif->cpusets[i]); CPU_SET(cpusets[i], &vif->cpusets[i]); } vif->lldev = NULL; // Add route to this VM in VRF/RIB. vif->nh.data = vif; vif->nh.fn = virtio_tx_packet; ipv4_route_add(label, ip, &vif->nh); /* Create VHOST-User socket */ unlink(vif->path); if (rte_vhost_driver_register(vif->path) < 0) { free(vif); return NULL; } return vif; }