Esempio n. 1
0
/* Called with iotlb_lock read-locked */
uint64_t
__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
		    uint64_t iova, uint64_t size, uint8_t perm)
{
	uint64_t vva, tmp_size;

	if (unlikely(!size))
		return 0;

	tmp_size = size;

	vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
	if (tmp_size == size)
		return vva;

	if (!vhost_user_iotlb_pending_miss(vq, iova + tmp_size, perm)) {
		/*
		 * iotlb_lock is read-locked for a full burst,
		 * but it only protects the iotlb cache.
		 * In case of IOTLB miss, we might block on the socket,
		 * which could cause a deadlock with QEMU if an IOTLB update
		 * is being handled. We can safely unlock here to avoid it.
		 */
		vhost_user_iotlb_rd_unlock(vq);

		vhost_user_iotlb_pending_insert(vq, iova + tmp_size, perm);
		vhost_user_iotlb_miss(dev, iova + tmp_size, perm);

		vhost_user_iotlb_rd_lock(vq);
	}

	return 0;
}
Esempio n. 2
0
/*
 * Converts ring address to Vhost virtual address.
 * If IOMMU is enabled, the ring address is a guest IO virtual address,
 * else it is a QEMU virtual address.
 */
static uint64_t
ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
		uint64_t ra, uint64_t *size)
{
	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
		uint64_t vva;

		vva = vhost_user_iotlb_cache_find(vq, ra,
					size, VHOST_ACCESS_RW);
		if (!vva)
			vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);

		return vva;
	}

	return qva_to_vva(dev, ra, size);
}