void vhost_backend_cleanup(struct virtio_net *dev) { if (dev->mem) { free_mem_region(dev); free(dev->mem); dev->mem = NULL; } if (dev->log_addr) { munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); dev->log_addr = 0; } }
void vhost_backend_cleanup(struct virtio_net *dev) { if (dev->mem) { free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; } free(dev->guest_pages); dev->guest_pages = NULL; if (dev->log_addr) { munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); dev->log_addr = 0; } if (dev->slave_req_fd >= 0) { close(dev->slave_req_fd); dev->slave_req_fd = -1; } }
int user_set_mem_table(int vid, struct VhostUserMsg *pmsg) { struct VhostUserMemory memory = pmsg->payload.memory; struct virtio_memory_regions *pregion; uint64_t mapped_address, mapped_size; struct virtio_net *dev; unsigned int idx = 0; struct orig_region_map *pregion_orig; uint64_t alignment; /* unmap old memory regions one by one*/ dev = get_device(vid); if (dev == NULL) return -1; /* Remove from the data plane. */ if (dev->flags & VIRTIO_DEV_RUNNING) { dev->flags &= ~VIRTIO_DEV_RUNNING; notify_ops->destroy_device(vid); } if (dev->mem) { free_mem_region(dev); free(dev->mem); dev->mem = NULL; } dev->mem = calloc(1, sizeof(struct virtio_memory) + sizeof(struct virtio_memory_regions) * memory.nregions + sizeof(struct orig_region_map) * memory.nregions); if (dev->mem == NULL) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) failed to allocate memory for dev->mem\n", dev->vid); return -1; } dev->mem->nregions = memory.nregions; pregion_orig = orig_region(dev->mem, memory.nregions); for (idx = 0; idx < memory.nregions; idx++) { pregion = &dev->mem->regions[idx]; pregion->guest_phys_address = memory.regions[idx].guest_phys_addr; pregion->guest_phys_address_end = memory.regions[idx].guest_phys_addr + memory.regions[idx].memory_size; pregion->memory_size = memory.regions[idx].memory_size; pregion->userspace_address = memory.regions[idx].userspace_addr; /* This is ugly */ mapped_size = memory.regions[idx].memory_size + memory.regions[idx].mmap_offset; /* mmap() without flag of MAP_ANONYMOUS, should be called * with length argument aligned with hugepagesz at older * longterm version Linux, like 2.6.32 and 3.2.72, or * mmap() will fail with EINVAL. * * to avoid failure, make sure in caller to keep length * aligned. */ alignment = get_blk_size(pmsg->fds[idx]); if (alignment == (uint64_t)-1) { RTE_LOG(ERR, VHOST_CONFIG, "couldn't get hugepage size through fstat\n"); goto err_mmap; } mapped_size = RTE_ALIGN_CEIL(mapped_size, alignment); mapped_address = (uint64_t)(uintptr_t)mmap(NULL, mapped_size, PROT_READ | PROT_WRITE, MAP_SHARED, pmsg->fds[idx], 0); RTE_LOG(INFO, VHOST_CONFIG, "mapped region %d fd:%d to:%p sz:0x%"PRIx64" " "off:0x%"PRIx64" align:0x%"PRIx64"\n", idx, pmsg->fds[idx], (void *)(uintptr_t)mapped_address, mapped_size, memory.regions[idx].mmap_offset, alignment); if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) { RTE_LOG(ERR, VHOST_CONFIG, "mmap qemu guest failed.\n"); goto err_mmap; } pregion_orig[idx].mapped_address = mapped_address; pregion_orig[idx].mapped_size = mapped_size; pregion_orig[idx].blksz = alignment; pregion_orig[idx].fd = pmsg->fds[idx]; mapped_address += memory.regions[idx].mmap_offset; pregion->address_offset = mapped_address - pregion->guest_phys_address; if (memory.regions[idx].guest_phys_addr == 0) { dev->mem->base_address = memory.regions[idx].userspace_addr; dev->mem->mapped_address = pregion->address_offset; } LOG_DEBUG(VHOST_CONFIG, "REGION: %u GPA: %p QEMU VA: %p SIZE (%"PRIu64")\n", idx, (void *)(uintptr_t)pregion->guest_phys_address, (void *)(uintptr_t)pregion->userspace_address, pregion->memory_size); } return 0; err_mmap: while (idx--) { munmap((void *)(uintptr_t)pregion_orig[idx].mapped_address, pregion_orig[idx].mapped_size); close(pregion_orig[idx].fd); } free(dev->mem); dev->mem = NULL; return -1; }