static void hostmem_client_set_memory(CPUPhysMemoryClient *client,
                                      target_phys_addr_t start_addr,
                                      ram_addr_t size,
                                      ram_addr_t phys_offset)
{
    HostMem *hostmem = container_of(client, HostMem, client);
    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
    size_t s = offsetof(struct vhost_memory, regions) +
               (hostmem->mem->nregions + 1) * sizeof hostmem->mem->regions[0];

    /* TODO: this is a hack.
     * At least one vga card (cirrus) changes the gpa to hva
     * memory maps on data path, which slows us down.
     * Since we should never need to DMA into VGA memory
     * anyway, lets just skip these regions. */
    if (ranges_overlap(start_addr, size, 0xa0000, 0x10000)) {
        return;
    }

    qemu_mutex_lock(&hostmem->mem_lock);

    hostmem->mem = qemu_realloc(hostmem->mem, s);

    assert(size);

    vhost_mem_unassign_memory(hostmem->mem, start_addr, size);
    if (flags == IO_MEM_RAM) {
        /* Add given mapping, merging adjacent regions if any */
        vhost_mem_assign_memory(hostmem->mem, start_addr, size,
                                (uintptr_t)qemu_get_ram_ptr(phys_offset));
    }

    qemu_mutex_unlock(&hostmem->mem_lock);
}
Exemple #2
0
static void vhost_client_set_memory(CPUPhysMemoryClient *client,
                                    target_phys_addr_t start_addr,
                                    ram_addr_t size,
                                    ram_addr_t phys_offset)
{
    struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
    int s = offsetof(struct vhost_memory, regions) +
        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
    uint64_t log_size;
    int r;

    /* TODO: this is a hack.
     * At least one vga card (cirrus) changes the gpa to hva 
     * memory maps on data path, which slows us down.
     * Since we should never need to DMA into VGA memory
     * anyway, lets just skip these regions. */
    if (ranges_overlap(start_addr, size, 0xa0000, 0x10000)) {
        return;
    }

    dev->mem = qemu_realloc(dev->mem, s);

    assert(size);

    vhost_mem_unassign_memory(dev->mem, start_addr, size);
    if (flags == IO_MEM_RAM) {
        /* Add given mapping, merging adjacent regions if any */
        vhost_mem_assign_memory(dev->mem, start_addr, size,
                                (uintptr_t)qemu_get_ram_ptr(phys_offset));
    } else {
        /* Remove old mapping for this memory, if any. */
        vhost_mem_unassign_memory(dev->mem, start_addr, size);
    }

    if (!dev->started) {
        return;
    }

    if (dev->started) {
        r = vhost_verify_ring_mappings(dev, start_addr, size);
        assert(r >= 0);
    }

    if (!dev->log_enabled) {
        r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
        assert(r >= 0);
        return;
    }
    log_size = vhost_get_log_size(dev);
    /* We allocate an extra 4K bytes to log,
     * to reduce the * number of reallocations. */
#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
    /* To log more, must increase log size before table update. */
    if (dev->log_size < log_size) {
        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
    }
    r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
    assert(r >= 0);
    /* To log less, can only decrease log size after table update. */
    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
        vhost_dev_log_resize(dev, log_size);
    }
}