Esempio n. 1
0
File: vring.c Progetto: JMR-b/qemu
void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
{
    virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx);
    virtio_queue_invalidate_signalled_used(vdev, n);

    memory_region_unref(vring->mr_desc);
    memory_region_unref(vring->mr_avail);
    memory_region_unref(vring->mr_used);
}
Esempio n. 2
0
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
    VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
    VirtQueueElement *elem;
    MemoryRegionSection section;

    for (;;) {
        size_t offset = 0;
        uint32_t pfn;
        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
        if (!elem) {
            return;
        }

        while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) {
            hwaddr pa;
            int p = virtio_ldl_p(vdev, &pfn);

            pa = (hwaddr) p << VIRTIO_BALLOON_PFN_SHIFT;
            offset += 4;

            section = memory_region_find(get_system_memory(), pa,
                                         BALLOON_PAGE_SIZE);
            if (!section.mr) {
                trace_virtio_balloon_bad_addr(pa);
                continue;
            }
            if (!memory_region_is_ram(section.mr) ||
                memory_region_is_rom(section.mr) ||
                memory_region_is_romd(section.mr)) {
                trace_virtio_balloon_bad_addr(pa);
                memory_region_unref(section.mr);
                continue;
            }

            trace_virtio_balloon_handle_output(memory_region_name(section.mr),
                                               pa);
            if (!qemu_balloon_is_inhibited()) {
                if (vq == s->ivq) {
                    balloon_inflate_page(s, section.mr,
                                         section.offset_within_region);
                } else if (vq == s->dvq) {
                    balloon_deflate_page(s, section.mr, section.offset_within_region);
                } else {
                    g_assert_not_reached();
                }
            }
            memory_region_unref(section.mr);
        }

        virtqueue_push(vq, elem, offset);
        virtio_notify(vdev, vq);
        g_free(elem);
    }
}
Esempio n. 3
0
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
    VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
    VirtQueueElement *elem;
    MemoryRegionSection section;

    for (;;) {
        size_t offset = 0;
        uint32_t pfn;
        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
        if (!elem) {
            return;
        }

        while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) {
            ram_addr_t pa;
            ram_addr_t addr;
            int p = virtio_ldl_p(vdev, &pfn);

            pa = (ram_addr_t) p << VIRTIO_BALLOON_PFN_SHIFT;
            offset += 4;

            /* FIXME: remove get_system_memory(), but how? */
            section = memory_region_find(get_system_memory(), pa, 1);
            if (!int128_nz(section.size) ||
                !memory_region_is_ram(section.mr) ||
                memory_region_is_rom(section.mr) ||
                memory_region_is_romd(section.mr)) {
                trace_virtio_balloon_bad_addr(pa);
                memory_region_unref(section.mr);
                continue;
            }

            trace_virtio_balloon_handle_output(memory_region_name(section.mr),
                                               pa);
            /* Using memory_region_get_ram_ptr is bending the rules a bit, but
               should be OK because we only want a single page.  */
            addr = section.offset_within_region;
            balloon_page(memory_region_get_ram_ptr(section.mr) + addr,
                         !!(vq == s->dvq));
            memory_region_unref(section.mr);
        }

        virtqueue_push(vq, elem, offset);
        virtio_notify(vdev, vq);
        g_free(elem);
    }
}
Esempio n. 4
0
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
    VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
    VirtQueueElement elem;
    MemoryRegionSection section;

    while (virtqueue_pop(vq, &elem)) {
        size_t offset = 0;
        uint32_t pfn;

        while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) {
            ram_addr_t pa;
            ram_addr_t addr;

            pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT;
            offset += 4;

            /* FIXME: remove get_system_memory(), but how? */
            section = memory_region_find(get_system_memory(), pa, 1);
            if (!int128_nz(section.size) || !memory_region_is_ram(section.mr))
                continue;

            /* Using memory_region_get_ram_ptr is bending the rules a bit, but
               should be OK because we only want a single page.  */
            addr = section.offset_within_region;
            balloon_page(memory_region_get_ram_ptr(section.mr) + addr,
                         !!(vq == s->dvq));
            memory_region_unref(section.mr);
        }

        virtqueue_push(vq, &elem, offset);
        virtio_notify(vdev, vq);
    }
}
Esempio n. 5
0
/* vring_map can be coupled with vring_unmap or (if you still have the
 * value returned in *mr) memory_region_unref.
 */
static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
                       bool is_write)
{
    MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);

    if (!section.mr || int128_get64(section.size) < len) {
        goto out;
    }
    if (is_write && section.readonly) {
        goto out;
    }
    if (!memory_region_is_ram(section.mr)) {
        goto out;
    }

    /* Ignore regions with dirty logging, we cannot mark them dirty */
    if (memory_region_is_logging(section.mr)) {
        goto out;
    }

    *mr = section.mr;
    return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;

out:
    memory_region_unref(section.mr);
    *mr = NULL;
    return NULL;
}
Esempio n. 6
0
File: vring.c Progetto: JMR-b/qemu
static void vring_unmap(void *buffer, bool is_write)
{
    ram_addr_t addr;
    MemoryRegion *mr;

    mr = qemu_ram_addr_from_host(buffer, &addr);
    memory_region_unref(mr);
}
Esempio n. 7
0
static int load_netboot_image(Error **errp)
{
    S390IPLState *ipl = get_ipl_device();
    char *netboot_filename;
    MemoryRegion *sysmem =  get_system_memory();
    MemoryRegion *mr = NULL;
    void *ram_ptr = NULL;
    int img_size = -1;

    mr = memory_region_find(sysmem, 0, 1).mr;
    if (!mr) {
        error_setg(errp, "Failed to find memory region at address 0");
        return -1;
    }

    ram_ptr = memory_region_get_ram_ptr(mr);
    if (!ram_ptr) {
        error_setg(errp, "No RAM found");
        goto unref_mr;
    }

    netboot_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ipl->netboot_fw);
    if (netboot_filename == NULL) {
        error_setg(errp, "Could not find network bootloader '%s'",
                   ipl->netboot_fw);
        goto unref_mr;
    }

    img_size = load_elf_ram(netboot_filename, NULL, NULL, &ipl->start_addr,
                            NULL, NULL, 1, EM_S390, 0, 0, NULL, false);

    if (img_size < 0) {
        img_size = load_image_size(netboot_filename, ram_ptr, ram_size);
        ipl->start_addr = KERN_IMAGE_START;
    }

    if (img_size < 0) {
        error_setg(errp, "Failed to load network bootloader");
    }

    g_free(netboot_filename);

unref_mr:
    memory_region_unref(mr);
    return img_size;
}
Esempio n. 8
0
/**
 * Install new regions list
 */
static void hostmem_listener_commit(MemoryListener *listener)
{
    HostMem *hostmem = container_of(listener, HostMem, listener);
    int i;

    qemu_mutex_lock(&hostmem->current_regions_lock);
    for (i = 0; i < hostmem->num_current_regions; i++) {
        memory_region_unref(hostmem->current_regions[i].mr);
    }
    g_free(hostmem->current_regions);
    hostmem->current_regions = hostmem->new_regions;
    hostmem->num_current_regions = hostmem->num_new_regions;
    qemu_mutex_unlock(&hostmem->current_regions_lock);

    /* Reset new regions list */
    hostmem->new_regions = NULL;
    hostmem->num_new_regions = 0;
}
Esempio n. 9
0
File: vring.c Progetto: JMR-b/qemu
/* vring_map can be coupled with vring_unmap or (if you still have the
 * value returned in *mr) memory_region_unref.
 * Returns NULL on failure.
 * Callers that can handle a partial mapping must supply mapped_len pointer to
 * get the actual length mapped.
 * Passing mapped_len == NULL requires either a full mapping or a failure.
 */
static void *vring_map(MemoryRegion **mr, hwaddr phys,
                       hwaddr len, hwaddr *mapped_len,
                       bool is_write)
{
    MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);
    uint64_t size;

    if (!section.mr) {
        goto out;
    }

    size = int128_get64(section.size);
    assert(size);

    /* Passing mapped_len == NULL requires either a full mapping or a failure. */
    if (!mapped_len && size < len) {
        goto out;
    }

    if (is_write && section.readonly) {
        goto out;
    }
    if (!memory_region_is_ram(section.mr)) {
        goto out;
    }

    /* Ignore regions with dirty logging, we cannot mark them dirty */
    if (memory_region_get_dirty_log_mask(section.mr)) {
        goto out;
    }

    if (mapped_len) {
        *mapped_len = MIN(size, len);
    }

    *mr = section.mr;
    return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;

out:
    memory_region_unref(section.mr);
    *mr = NULL;
    return NULL;
}
Esempio n. 10
0
static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid)
{
    struct kvm_msi msi;

    if (unlikely(!s->translater_gpa_known)) {
        MemoryRegion *mr = &s->iomem_its_translation;
        MemoryRegionSection mrs;

        mrs = memory_region_find(mr, 0, 1);
        memory_region_unref(mrs.mr);
        s->gits_translater_gpa = mrs.offset_within_address_space + 0x40;
        s->translater_gpa_known = true;
    }

    msi.address_lo = extract64(s->gits_translater_gpa, 0, 32);
    msi.address_hi = extract64(s->gits_translater_gpa, 32, 32);
    msi.data = le32_to_cpu(value);
    msi.flags = KVM_MSI_VALID_DEVID;
    msi.devid = devid;
    memset(msi.pad, 0, sizeof(msi.pad));

    return kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi);
}
Esempio n. 11
0
File: vring.c Progetto: JMR-b/qemu
/* Map the guest's vring to host memory */
bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
{
    struct vring *vr = &vring->vr;
    hwaddr addr;
    hwaddr size;
    void *ptr;

    vring->broken = false;
    vr->num = virtio_queue_get_num(vdev, n);

    addr = virtio_queue_get_desc_addr(vdev, n);
    size = virtio_queue_get_desc_size(vdev, n);
    /* Map the descriptor area as read only */
    ptr = vring_map(&vring->mr_desc, addr, size, NULL, false);
    if (!ptr) {
        error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring desc "
                     "at 0x%" HWADDR_PRIx,
                      size, addr);
        goto out_err_desc;
    }
    vr->desc = ptr;

    addr = virtio_queue_get_avail_addr(vdev, n);
    size = virtio_queue_get_avail_size(vdev, n);
    /* Add the size of the used_event_idx */
    size += sizeof(uint16_t);
    /* Map the driver area as read only */
    ptr = vring_map(&vring->mr_avail, addr, size, NULL, false);
    if (!ptr) {
        error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring avail "
                     "at 0x%" HWADDR_PRIx,
                      size, addr);
        goto out_err_avail;
    }
    vr->avail = ptr;

    addr = virtio_queue_get_used_addr(vdev, n);
    size = virtio_queue_get_used_size(vdev, n);
    /* Add the size of the avail_event_idx */
    size += sizeof(uint16_t);
    /* Map the device area as read-write */
    ptr = vring_map(&vring->mr_used, addr, size, NULL, true);
    if (!ptr) {
        error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring used "
                     "at 0x%" HWADDR_PRIx,
                      size, addr);
        goto out_err_used;
    }
    vr->used = ptr;

    vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n);
    vring->last_used_idx = vring_get_used_idx(vdev, vring);
    vring->signalled_used = 0;
    vring->signalled_used_valid = false;

    trace_vring_setup(virtio_queue_get_ring_addr(vdev, n),
                      vring->vr.desc, vring->vr.avail, vring->vr.used);
    return true;

out_err_used:
    memory_region_unref(vring->mr_avail);
out_err_avail:
    memory_region_unref(vring->mr_desc);
out_err_desc:
    vring->broken = true;
    return false;
}
Esempio n. 12
0
void framebuffer_update_display(
    DisplaySurface *ds,
    MemoryRegion *address_space,
    hwaddr base,
    int cols, /* Width in pixels.  */
    int rows, /* Height in pixels.  */
    int src_width, /* Length of source line, in bytes.  */
    int dest_row_pitch, /* Bytes between adjacent horizontal output pixels.  */
    int dest_col_pitch, /* Bytes between adjacent vertical output pixels.  */
    int invalidate, /* nonzero to redraw the whole image.  */
    drawfn fn,
    void *opaque,
    int *first_row, /* Input and output.  */
    int *last_row /* Output only */)
{
    hwaddr src_len;
    uint8_t *dest;
    uint8_t *src;
    uint8_t *src_base;
    int first, last = 0;
    int dirty;
    int i;
    ram_addr_t addr;
    MemoryRegionSection mem_section;
    MemoryRegion *mem;

    i = *first_row;
    *first_row = -1;
    src_len = src_width * rows;

    mem_section = memory_region_find(address_space, base, src_len);
    mem = mem_section.mr;
    if (int128_get64(mem_section.size) != src_len ||
            !memory_region_is_ram(mem_section.mr)) {
        goto out;
    }
    assert(mem);
    assert(mem_section.offset_within_address_space == base);

    memory_region_sync_dirty_bitmap(mem);
    if (!memory_region_is_logging(mem, DIRTY_MEMORY_VGA)) {
        invalidate = true;
    }

    src_base = cpu_physical_memory_map(base, &src_len, 0);
    /* If we can't map the framebuffer then bail.  We could try harder,
       but it's not really worth it as dirty flag tracking will probably
       already have failed above.  */
    if (!src_base)
        goto out;
    if (src_len != src_width * rows) {
        cpu_physical_memory_unmap(src_base, src_len, 0, 0);
        goto out;
    }
    src = src_base;
    dest = surface_data(ds);
    if (dest_col_pitch < 0)
        dest -= dest_col_pitch * (cols - 1);
    if (dest_row_pitch < 0) {
        dest -= dest_row_pitch * (rows - 1);
    }
    first = -1;
    addr = mem_section.offset_within_region;

    addr += i * src_width;
    src += i * src_width;
    dest += i * dest_row_pitch;

    for (; i < rows; i++) {
        dirty = memory_region_get_dirty(mem, addr, src_width,
                                             DIRTY_MEMORY_VGA);
        if (dirty || invalidate) {
            fn(opaque, dest, src, cols, dest_col_pitch);
            if (first == -1)
                first = i;
            last = i;
        }
        addr += src_width;
        src += src_width;
        dest += dest_row_pitch;
    }
    cpu_physical_memory_unmap(src_base, src_len, 0, 0);
    if (first < 0) {
        goto out;
    }
    memory_region_reset_dirty(mem, mem_section.offset_within_region, src_len,
                              DIRTY_MEMORY_VGA);
    *first_row = first;
    *last_row = last;
out:
    memory_region_unref(mem);
}
Esempio n. 13
0
static void whpx_region_del(MemoryListener *listener,
                           MemoryRegionSection *section)
{
    whpx_process_section(section, 0);
    memory_region_unref(section->mr);
}