static inline int32_t service_run(uint32_t i, struct core_state *cs, uint64_t service_mask) { if (!service_valid(i)) return -EINVAL; struct rte_service_spec_impl *s = &rte_services[i]; if (s->comp_runstate != RUNSTATE_RUNNING || s->app_runstate != RUNSTATE_RUNNING || !(service_mask & (UINT64_C(1) << i))) return -ENOEXEC; /* check do we need cmpset, if MT safe or <= 1 core * mapped, atomic ops are not required. */ const int use_atomics = (service_mt_safe(s) == 0) && (rte_atomic32_read(&s->num_mapped_cores) > 1); if (use_atomics) { if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1)) return -EBUSY; rte_service_runner_do_callback(s, cs, i); rte_atomic32_clear(&s->execute_lock); } else rte_service_runner_do_callback(s, cs, i); return 0; }
// Called when a VM starts a vhost-user device static int new_device(struct virtio_net *dev) { struct virtio_net_ll* lldev = (struct virtio_net_ll*) malloc( sizeof(struct virtio_net_ll) ); int q_no; pthread_mutex_lock(&ll_virtio_net_lock); lldev->dev = dev; lldev->next = ll_virtio_net_root; ll_virtio_net_root = lldev; dev->priv = lldev; pthread_mutex_unlock(&ll_virtio_net_lock); lldev->nb_queues = dev->virt_qp_nb; lldev->queue = (struct virtqueue*) malloc(sizeof(struct virtqueue) * lldev->nb_queues * VIRTIO_QNUM); #define VIRTIO_RXQ_NO(X) ((X) * VIRTIO_QNUM + VIRTIO_RXQ) #define VIRTIO_TXQ_NO(X) ((X) * VIRTIO_QNUM + VIRTIO_TXQ) for (q_no = 0; q_no < lldev->nb_queues; q_no++) { lldev->queue[q_no].callfd = dev->virtqueue[VIRTIO_RXQ_NO(q_no)]->callfd; lldev->queue[q_no].kickfd = dev->virtqueue[VIRTIO_TXQ_NO(q_no)]->kickfd; lldev->queue[q_no].rxq = dev->virtqueue[VIRTIO_TXQ_NO(q_no)]; lldev->queue[q_no].txq = dev->virtqueue[VIRTIO_RXQ_NO(q_no)]; rte_atomic64_clear(&lldev->queue[q_no].rx_packets); rte_atomic64_clear(&lldev->queue[q_no].tx_packets); rte_atomic64_clear(&lldev->queue[q_no].dropped_packets); rte_atomic64_clear(&lldev->queue[q_no].error_packets); lldev->queue[q_no].entry_read = 0; rte_atomic32_clear(&lldev->queue[q_no].taxi_count); } // Link up dev->flags |= VIRTIO_DEV_RUNNING; // Schedule the BH for fixups if (schedule_work(virtio_new_device_bh, lldev) < 0) { log_crit("Failed to schedul work for new device (%ld)\n", dev->device_fh); dev->flags &= ~VIRTIO_DEV_RUNNING; free(lldev->queue); free(lldev); return -1; } return 0; }