예제 #1
0
파일: ipl.c 프로젝트: bear987978897/qemu
static void s390_ipl_prepare_qipl(S390CPU *cpu)
{
    S390IPLState *ipl = get_ipl_device();
    uint8_t *addr;
    uint64_t len = 4096;

    addr = cpu_physical_memory_map(cpu->env.psa, &len, 1);
    if (!addr || len < QIPL_ADDRESS + sizeof(QemuIplParameters)) {
        error_report("Cannot set QEMU IPL parameters");
        return;
    }
    memcpy(addr + QIPL_ADDRESS, &ipl->qipl, sizeof(QemuIplParameters));
    cpu_physical_memory_unmap(addr, len, 1, len);
}
예제 #2
0
static void rtas_nvram_fetch(PowerPCCPU *cpu, sPAPREnvironment *spapr,
                             uint32_t token, uint32_t nargs,
                             target_ulong args,
                             uint32_t nret, target_ulong rets)
{
    sPAPRNVRAM *nvram = spapr->nvram;
    hwaddr offset, buffer, len;
    int alen;
    void *membuf;

    if ((nargs != 3) || (nret != 2)) {
        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
        return;
    }

    if (!nvram) {
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
        rtas_st(rets, 1, 0);
        return;
    }

    offset = rtas_ld(args, 0);
    buffer = rtas_ld(args, 1);
    len = rtas_ld(args, 2);

    if (((offset + len) < offset)
        || ((offset + len) > nvram->size)) {
        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
        rtas_st(rets, 1, 0);
        return;
    }

    membuf = cpu_physical_memory_map(buffer, &len, 1);
    if (nvram->drive) {
        alen = bdrv_pread(nvram->drive, offset, membuf, len);
    } else {
        assert(nvram->buf);

        memcpy(membuf, nvram->buf + offset, len);
        alen = len;
    }
    cpu_physical_memory_unmap(membuf, len, 1, len);

    rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS);
    rtas_st(rets, 1, (alen < 0) ? 0 : alen);
}
예제 #3
0
/* PC hardware initialisation */
static void s390_init(MachineState *machine)
{
    ram_addr_t my_ram_size;
    void *virtio_region;
    hwaddr virtio_region_len;
    hwaddr virtio_region_start;

    if (machine->ram_slots) {
        error_report("Memory hotplug not supported by the selected machine.");
        exit(EXIT_FAILURE);
    }
    s390_sclp_init();
    my_ram_size = machine->ram_size;

    /* get a BUS */
    s390_bus = s390_virtio_bus_init(&my_ram_size);
    s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
                      machine->initrd_filename, ZIPL_FILENAME, false);
    s390_flic_init();

    /* register hypercalls */
    s390_virtio_register_hcalls();

    /* allocate RAM */
    s390_memory_init(my_ram_size);

    /* clear virtio region */
    virtio_region_len = my_ram_size - ram_size;
    virtio_region_start = ram_size;
    virtio_region = cpu_physical_memory_map(virtio_region_start,
                                            &virtio_region_len, true);
    memset(virtio_region, 0, virtio_region_len);
    cpu_physical_memory_unmap(virtio_region, virtio_region_len, 1,
                              virtio_region_len);

    /* init CPUs */
    s390_init_cpus(machine->cpu_model);

    /* Create VirtIO network adapters */
    s390_create_virtio_net((BusState *)s390_bus, "virtio-net-s390");

    /* Register savevm handler for guest TOD clock */
    register_savevm(NULL, "todclock", 0, 1, gtod_save, gtod_load, NULL);
}
예제 #4
0
static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
                            uint8_t byte)
{
    hwaddr dest_phys;
    hwaddr len = l;
    void *dest_p;
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
    int flags;

    if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
        cpu_stb_data(env, dest, byte);
        cpu_abort(env, "should never reach here");
    }
    dest_phys |= dest & ~TARGET_PAGE_MASK;

    dest_p = cpu_physical_memory_map(dest_phys, &len, 1);

    memset(dest_p, byte, len);

    cpu_physical_memory_unmap(dest_p, 1, len, len);
}
예제 #5
0
void yagl_compiled_transfer_destroy(struct yagl_compiled_transfer *ct)
{
    int i;

    if (ct->in_list) {
        QLIST_REMOVE(ct, entry);
        ct->in_list = false;
    }

    for (i = 0; i < ct->num_sections; ++i) {
        cpu_physical_memory_unmap(ct->sections[i].map_base,
                                  ct->sections[i].map_len,
                                  0,
                                  ct->sections[i].map_len);
    }

    g_free(ct->sections);

    ct->sections = NULL;
    ct->num_sections = 0;

    g_free(ct);
}
예제 #6
0
static int vhost_virtqueue_init(struct vhost_dev *dev,
                                struct VirtIODevice *vdev,
                                struct vhost_virtqueue *vq,
                                unsigned idx)
{
    target_phys_addr_t s, l, a;
    int r;
    struct vhost_vring_file file = {
        .index = idx,
    };
    struct vhost_vring_state state = {
        .index = idx,
    };
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);

    if (!vdev->binding->set_host_notifier) {
        fprintf(stderr, "binding does not support host notifiers\n");
        return -ENOSYS;
    }

    vq->num = state.num = virtio_queue_get_num(vdev, idx);
    r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
    if (r) {
        return -errno;
    }

    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
    r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
    if (r) {
        return -errno;
    }

    s = l = virtio_queue_get_desc_size(vdev, idx);
    a = virtio_queue_get_desc_addr(vdev, idx);
    vq->desc = cpu_physical_memory_map(a, &l, 0);
    if (!vq->desc || l != s) {
        r = -ENOMEM;
        goto fail_alloc_desc;
    }
    s = l = virtio_queue_get_avail_size(vdev, idx);
    a = virtio_queue_get_avail_addr(vdev, idx);
    vq->avail = cpu_physical_memory_map(a, &l, 0);
    if (!vq->avail || l != s) {
        r = -ENOMEM;
        goto fail_alloc_avail;
    }
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
    vq->used = cpu_physical_memory_map(a, &l, 1);
    if (!vq->used || l != s) {
        r = -ENOMEM;
        goto fail_alloc_used;
    }

    vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
    vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
    vq->ring = cpu_physical_memory_map(a, &l, 1);
    if (!vq->ring || l != s) {
        r = -ENOMEM;
        goto fail_alloc_ring;
    }

    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
    if (r < 0) {
        r = -errno;
        goto fail_alloc;
    }
    r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
    if (r < 0) {
        fprintf(stderr, "Error binding host notifier: %d\n", -r);
        goto fail_host_notifier;
    }

    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
    r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
    if (r) {
        r = -errno;
        goto fail_kick;
    }

    file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
    r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
    if (r) {
        r = -errno;
        goto fail_call;
    }

    return 0;

fail_call:
fail_kick:
    vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
fail_host_notifier:
fail_alloc:
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
                              0, 0);
fail_alloc_ring:
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                              0, 0);
fail_alloc_used:
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                              0, 0);
fail_alloc_avail:
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                              0, 0);
fail_alloc_desc:
    return r;
}
예제 #7
0
/* PC hardware initialisation */
static void s390_init(ram_addr_t my_ram_size,
                      const char *boot_device,
                      const char *kernel_filename,
                      const char *kernel_cmdline,
                      const char *initrd_filename,
                      const char *cpu_model)
{
    CPUState *env = NULL;
    MemoryRegion *sysmem = get_system_memory();
    MemoryRegion *ram = g_new(MemoryRegion, 1);
    ram_addr_t kernel_size = 0;
    ram_addr_t initrd_offset;
    ram_addr_t initrd_size = 0;
    int shift = 0;
    uint8_t *storage_keys;
    void *virtio_region;
    target_phys_addr_t virtio_region_len;
    target_phys_addr_t virtio_region_start;
    int i;

    /* s390x ram size detection needs a 16bit multiplier + an increment. So
       guests > 64GB can be specified in 2MB steps etc. */
    while ((my_ram_size >> (20 + shift)) > 65535) {
        shift++;
    }
    my_ram_size = my_ram_size >> (20 + shift) << (20 + shift);

    /* lets propagate the changed ram size into the global variable. */
    ram_size = my_ram_size;

    /* get a BUS */
    s390_bus = s390_virtio_bus_init(&my_ram_size);

    /* allocate RAM */
    memory_region_init_ram(ram, "s390.ram", my_ram_size);
    vmstate_register_ram_global(ram);
    memory_region_add_subregion(sysmem, 0, ram);

    /* clear virtio region */
    virtio_region_len = my_ram_size - ram_size;
    virtio_region_start = ram_size;
    virtio_region = cpu_physical_memory_map(virtio_region_start,
                                            &virtio_region_len, true);
    memset(virtio_region, 0, virtio_region_len);
    cpu_physical_memory_unmap(virtio_region, virtio_region_len, 1,
                              virtio_region_len);

    /* allocate storage keys */
    storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE);

    /* init CPUs */
    if (cpu_model == NULL) {
        cpu_model = "host";
    }

    ipi_states = g_malloc(sizeof(CPUState *) * smp_cpus);

    for (i = 0; i < smp_cpus; i++) {
        CPUState *tmp_env;

        tmp_env = cpu_init(cpu_model);
        if (!env) {
            env = tmp_env;
        }
        ipi_states[i] = tmp_env;
        tmp_env->halted = 1;
        tmp_env->exception_index = EXCP_HLT;
        tmp_env->storage_keys = storage_keys;
    }

    /* One CPU has to run */
    s390_add_running_cpu(env);

    if (kernel_filename) {

        kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, NULL,
                               NULL, 1, ELF_MACHINE, 0);
        if (kernel_size == -1UL) {
            kernel_size = load_image_targphys(kernel_filename, 0, ram_size);
        }
        /*
         * we can not rely on the ELF entry point, since up to 3.2 this
         * value was 0x800 (the SALIPL loader) and it wont work. For
         * all (Linux) cases 0x10000 (KERN_IMAGE_START) should be fine.
         */
        env->psw.addr = KERN_IMAGE_START;
        env->psw.mask = 0x0000000180000000ULL;
    } else {
        ram_addr_t bios_size = 0;
        char *bios_filename;

        /* Load zipl bootloader */
        if (bios_name == NULL) {
            bios_name = ZIPL_FILENAME;
        }

        bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
        bios_size = load_image_targphys(bios_filename, ZIPL_LOAD_ADDR, 4096);
        g_free(bios_filename);

        if ((long)bios_size < 0) {
            hw_error("could not load bootloader '%s'\n", bios_name);
        }

        if (bios_size > 4096) {
            hw_error("stage1 bootloader is > 4k\n");
        }

        env->psw.addr = ZIPL_START;
        env->psw.mask = 0x0000000180000000ULL;
    }

    if (initrd_filename) {
        initrd_offset = INITRD_START;
        while (kernel_size + 0x100000 > initrd_offset) {
            initrd_offset += 0x100000;
        }
        initrd_size = load_image_targphys(initrd_filename, initrd_offset,
                                          ram_size - initrd_offset);
        /* we have to overwrite values in the kernel image, which are "rom" */
        memcpy(rom_ptr(INITRD_PARM_START), &initrd_offset, 8);
        memcpy(rom_ptr(INITRD_PARM_SIZE), &initrd_size, 8);
    }

    if (kernel_cmdline) {
        /* we have to overwrite values in the kernel image, which are "rom" */
        memcpy(rom_ptr(KERN_PARM_AREA), kernel_cmdline,
               strlen(kernel_cmdline) + 1);
    }

    /* Create VirtIO network adapters */
    for(i = 0; i < nb_nics; i++) {
        NICInfo *nd = &nd_table[i];
        DeviceState *dev;

        if (!nd->model) {
            nd->model = g_strdup("virtio");
        }

        if (strcmp(nd->model, "virtio")) {
            fprintf(stderr, "S390 only supports VirtIO nics\n");
            exit(1);
        }

        dev = qdev_create((BusState *)s390_bus, "virtio-net-s390");
        qdev_set_nic_properties(dev, nd);
        qdev_init_nofail(dev);
    }

    /* Create VirtIO disk drives */
    for(i = 0; i < MAX_BLK_DEVS; i++) {
        DriveInfo *dinfo;
        DeviceState *dev;

        dinfo = drive_get(IF_IDE, 0, i);
        if (!dinfo) {
            continue;
        }

        dev = qdev_create((BusState *)s390_bus, "virtio-blk-s390");
        qdev_prop_set_drive_nofail(dev, "drive", dinfo->bdrv);
        qdev_init_nofail(dev);
    }
}
예제 #8
0
void yagl_compiled_transfer_prepare(struct yagl_compiled_transfer *ct)
{
    struct yagl_vector v;
    target_ulong last_page_va = YAGL_TARGET_PAGE_VA(ct->va + ct->len - 1);
    target_ulong cur_va = ct->va;
    uint32_t len = ct->len;
    int i, num_sections;

    YAGL_LOG_FUNC_ENTER(yagl_compiled_transfer_prepare,
                        "va = 0x%X, len = 0x%X, is_write = %u",
                        (uint32_t)ct->va,
                        ct->len,
                        (uint32_t)ct->is_write);

    if (ct->in_list) {
        QLIST_REMOVE(ct, entry);
        ct->in_list = false;
    }

    yagl_vector_init(&v, sizeof(struct yagl_compiled_transfer_section), 0);

    while (len) {
        target_ulong start_page_va = YAGL_TARGET_PAGE_VA(cur_va);
        hwaddr start_page_pa = yagl_pa(start_page_va);
        target_ulong end_page_va;
        struct yagl_compiled_transfer_section section;

        if (!start_page_pa) {
            YAGL_LOG_ERROR("yagl_pa of va 0x%X failed", (uint32_t)start_page_va);
            goto fail;
        }

        end_page_va = start_page_va;

        while (end_page_va < last_page_va) {
            target_ulong next_page_va = end_page_va + TARGET_PAGE_SIZE;
            hwaddr next_page_pa = yagl_pa(next_page_va);

            if (!next_page_pa) {
                YAGL_LOG_ERROR("yagl_pa of va 0x%X failed", (uint32_t)next_page_va);
                goto fail;
            }

            /*
             * If the target pages are not linearly spaced, stop.
             */

            if ((next_page_pa < start_page_pa) ||
                ((next_page_pa - start_page_pa) >
                 (next_page_va - start_page_va))) {
                break;
            }

            end_page_va = next_page_va;
        }

        section.map_len = end_page_va + TARGET_PAGE_SIZE - start_page_va;
        section.map_base = cpu_physical_memory_map(start_page_pa, &section.map_len, 0);

        if (!section.map_base || !section.map_len) {
            YAGL_LOG_ERROR("cpu_physical_memory_map(0x%X, %u) failed",
                           (uint32_t)start_page_pa,
                           (uint32_t)section.map_len);
            goto fail;
        }

        section.len = end_page_va + TARGET_PAGE_SIZE - cur_va;

        if (section.len > len) {
            section.len = len;
        }

        section.base = (char*)section.map_base + YAGL_TARGET_PAGE_OFFSET(cur_va);

        yagl_vector_push_back(&v, &section);

        len -= section.len;
        cur_va += section.len;
    }

    ct->num_sections = yagl_vector_size(&v);
    ct->sections = yagl_vector_detach(&v);

    YAGL_LOG_FUNC_EXIT("num_sections = %d", ct->num_sections);

    return;

fail:
    num_sections = yagl_vector_size(&v);

    for (i = 0; i < num_sections; ++i) {
        struct yagl_compiled_transfer_section *section =
            (struct yagl_compiled_transfer_section*)
                ((char*)yagl_vector_data(&v) +
                 (i * sizeof(struct yagl_compiled_transfer_section)));

        cpu_physical_memory_unmap(section->map_base,
                                  section->map_len,
                                  0,
                                  section->map_len);
    }

    yagl_vector_cleanup(&v);

    YAGL_LOG_FUNC_EXIT(NULL);
}
예제 #9
0
void framebuffer_update_display(
    DisplayState *ds,
    MemoryRegion *address_space,
    target_phys_addr_t base,
    int cols, /* Width in pixels.  */
    int rows, /* Height in pixels.  */
    int src_width, /* Length of source line, in bytes.  */
    int dest_row_pitch, /* Bytes between adjacent horizontal output pixels.  */
    int dest_col_pitch, /* Bytes between adjacent vertical output pixels.  */
    int invalidate, /* nonzero to redraw the whole image.  */
    drawfn fn,
    void *opaque,
    int *first_row, /* Input and output.  */
    int *last_row /* Output only */)
{
    target_phys_addr_t src_len;
    uint8_t *dest;
    uint8_t *src;
    uint8_t *src_base;
    int first, last = 0;
    int dirty;
    int i;
    ram_addr_t addr;
    MemoryRegionSection mem_section;
    MemoryRegion *mem;

    i = *first_row;
    *first_row = -1;
    src_len = src_width * rows;

    mem_section = memory_region_find(address_space, base, src_len);
    if (mem_section.size != src_len || !memory_region_is_ram(mem_section.mr)) {
        return;
    }
    mem = mem_section.mr;
    assert(mem);
    assert(mem_section.offset_within_address_space == base);

    memory_region_sync_dirty_bitmap(mem);
    src_base = cpu_physical_memory_map(base, &src_len, 0);
    /* If we can't map the framebuffer then bail.  We could try harder,
       but it's not really worth it as dirty flag tracking will probably
       already have failed above.  */
    if (!src_base)
        return;
    if (src_len != src_width * rows) {
        cpu_physical_memory_unmap(src_base, src_len, 0, 0);
        return;
    }
    src = src_base;
    dest = ds_get_data(ds);
    if (dest_col_pitch < 0)
        dest -= dest_col_pitch * (cols - 1);
    if (dest_row_pitch < 0) {
        dest -= dest_row_pitch * (rows - 1);
    }
    first = -1;
    addr = mem_section.offset_within_region;

    addr += i * src_width;
    src += i * src_width;
    dest += i * dest_row_pitch;

    for (; i < rows; i++) {
        dirty = memory_region_get_dirty(mem, addr, src_width,
                                             DIRTY_MEMORY_VGA);
        if (dirty || invalidate) {
            fn(opaque, dest, src, cols, dest_col_pitch);
            if (first == -1)
                first = i;
            last = i;
        }
        addr += src_width;
        src += src_width;
        dest += dest_row_pitch;
    }
    cpu_physical_memory_unmap(src_base, src_len, 0, 0);
    if (first < 0) {
        return;
    }
    memory_region_reset_dirty(mem, mem_section.offset_within_region, src_len,
                              DIRTY_MEMORY_VGA);
    *first_row = first;
    *last_row = last;
    return;
}
예제 #10
0
파일: framebuffer.c 프로젝트: 3a9LL/panda
void framebuffer_update_display(
    DisplayState *ds,
    target_phys_addr_t base,
    int cols, /* Width in pixels.  */
    int rows, /* Leight in pixels.  */
    int src_width, /* Length of source line, in bytes.  */
    int dest_row_pitch, /* Bytes between adjacent horizontal output pixels.  */
    int dest_col_pitch, /* Bytes between adjacent vertical output pixels.  */
    int invalidate, /* nonzero to redraw the whole image.  */
    drawfn fn,
    void *opaque,
    int *first_row, /* Input and output.  */
    int *last_row /* Output only */)
{
    target_phys_addr_t src_len;
    uint8_t *dest;
    uint8_t *src;
    uint8_t *src_base;
    int first, last = 0;
    int dirty;
    int i;
    ram_addr_t addr;
    ram_addr_t pd;
    ram_addr_t pd2;

    i = *first_row;
    *first_row = -1;
    src_len = src_width * rows;

    cpu_physical_sync_dirty_bitmap(base, base + src_len);
    pd = cpu_get_physical_page_desc(base);
    pd2 = cpu_get_physical_page_desc(base + src_len - 1);
    /* We should reall check that this is a continuous ram region.
       Instead we just check that the first and last pages are
       both ram, and the right distance apart.  */
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM
        || (pd2 & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
        return;
    }
    pd = (pd & TARGET_PAGE_MASK) + (base & ~TARGET_PAGE_MASK);
    if (((pd + src_len - 1) & TARGET_PAGE_MASK) != (pd2 & TARGET_PAGE_MASK)) {
        return;
    }

    src_base = cpu_physical_memory_map(base, &src_len, 0);
    /* If we can't map the framebuffer then bail.  We could try harder,
       but it's not really worth it as dirty flag tracking will probably
       already have failed above.  */
    if (!src_base)
        return;
    if (src_len != src_width * rows) {
        cpu_physical_memory_unmap(src_base, src_len, 0, 0);
        return;
    }
    src = src_base;
    dest = ds_get_data(ds);
    if (dest_col_pitch < 0)
        dest -= dest_col_pitch * (cols - 1);
    if (dest_row_pitch < 0) {
        dest -= dest_row_pitch * (rows - 1);
    }
    first = -1;
    addr = pd;

    addr += i * src_width;
    src += i * src_width;
    dest += i * dest_row_pitch;

    for (; i < rows; i++) {
        target_phys_addr_t dirty_offset;
        dirty = 0;
        dirty_offset = 0;
        while (addr + dirty_offset < TARGET_PAGE_ALIGN(addr + src_width)) {
            dirty |= cpu_physical_memory_get_dirty(addr + dirty_offset,
                                                   VGA_DIRTY_FLAG);
            dirty_offset += TARGET_PAGE_SIZE;
        }

        if (dirty || invalidate) {
            fn(opaque, dest, src, cols, dest_col_pitch);
            if (first == -1)
                first = i;
            last = i;
        }
        addr += src_width;
        src += src_width;
        dest += dest_row_pitch;
    }
    cpu_physical_memory_unmap(src_base, src_len, 0, 0);
    if (first < 0) {
        return;
    }
    cpu_physical_memory_reset_dirty(pd, pd + src_len, VGA_DIRTY_FLAG);
    *first_row = first;
    *last_row = last;
    return;
}
예제 #11
0
파일: helper.c 프로젝트: CTU-IIG/qemu
void cpu_unmap_lowcore(LowCore *lowcore)
{
    cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
}
예제 #12
0
/* FIXME: add a vq index parameter */
static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
                                    struct VirtIODevice *vdev,
                                    struct vhost_virtqueue *vq,
                                    unsigned idx)
{
    struct vhost_vring_state state = {
        .index = idx % dev->nvqs,
    };
    int r;
    r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
    if (r < 0) {
        fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
        fflush(stderr);
    }
    virtio_queue_set_last_avail_idx(vdev, idx, state.num);
    assert (r >= 0);
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
                              0, virtio_queue_get_ring_size(vdev, idx));
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                              1, virtio_queue_get_used_size(vdev, idx));
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                              0, virtio_queue_get_avail_size(vdev, idx));
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                              0, virtio_queue_get_desc_size(vdev, idx));
}

static void vhost_eventfd_add(MemoryListener *listener,
                              MemoryRegionSection *section,
                              bool match_data, uint64_t data, int fd)
{
}

static void vhost_eventfd_del(MemoryListener *listener,
                              MemoryRegionSection *section,
                              bool match_data, uint64_t data, int fd)
{
}

int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force)
{
    uint64_t features;
    int r;
    if (devfd >= 0) {
        hdev->control = devfd;
    } else {
        hdev->control = open("/dev/vhost-net", O_RDWR);
        if (hdev->control < 0) {
            return -errno;
        }
    }
    r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
    if (r < 0) {
        goto fail;
    }

    r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
    if (r < 0) {
        goto fail;
    }
    hdev->features = features;

    hdev->memory_listener = (MemoryListener) {
        .begin = vhost_begin,
        .commit = vhost_commit,
        .region_add = vhost_region_add,
        .region_del = vhost_region_del,
        .region_nop = vhost_region_nop,
        .log_start = vhost_log_start,
        .log_stop = vhost_log_stop,
        .log_sync = vhost_log_sync,
        .log_global_start = vhost_log_global_start,
        .log_global_stop = vhost_log_global_stop,
        .eventfd_add = vhost_eventfd_add,
        .eventfd_del = vhost_eventfd_del,
        .priority = 10
    };
    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
    hdev->n_mem_sections = 0;
    hdev->mem_sections = NULL;
    hdev->log = NULL;
    hdev->log_size = 0;
    hdev->log_enabled = false;
    hdev->started = false;
    memory_listener_register(&hdev->memory_listener, NULL);
    hdev->force = force;
    return 0;
fail:
    r = -errno;
    close(hdev->control);
    return r;
}

void vhost_dev_cleanup(struct vhost_dev *hdev)
{
    memory_listener_unregister(&hdev->memory_listener);
    g_free(hdev->mem);
    g_free(hdev->mem_sections);
    close(hdev->control);
}
예제 #13
0
static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
{
    int ret;
    VqInfoBlock info;
    uint8_t status;
    VirtioFeatDesc features;
    void *config;
    hwaddr indicators;
    VqConfigBlock vq_config;
    VirtioCcwDevice *dev = sch->driver_data;
    bool check_len;
    int len;
    hwaddr hw_len;

    if (!dev) {
        return -EINVAL;
    }

    trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
                                   ccw.cmd_code);
    check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));

    /* Look at the command. */
    switch (ccw.cmd_code) {
    case CCW_CMD_SET_VQ:
        if (check_len) {
            if (ccw.count != sizeof(info)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(info)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            info.queue = ldq_phys(ccw.cda);
            info.align = ldl_phys(ccw.cda + sizeof(info.queue));
            info.index = lduw_phys(ccw.cda + sizeof(info.queue)
                                   + sizeof(info.align));
            info.num = lduw_phys(ccw.cda + sizeof(info.queue)
                                 + sizeof(info.align)
                                 + sizeof(info.index));
            ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index,
                                     info.num);
            sch->curr_status.scsw.count = 0;
        }
        break;
    case CCW_CMD_VDEV_RESET:
        virtio_reset(dev->vdev);
        ret = 0;
        break;
    case CCW_CMD_READ_FEAT:
        if (check_len) {
            if (ccw.count != sizeof(features)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(features)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            features.index = ldub_phys(ccw.cda + sizeof(features.features));
            if (features.index < ARRAY_SIZE(dev->host_features)) {
                features.features = dev->host_features[features.index];
            } else {
                /* Return zeroes if the guest supports more feature bits. */
                features.features = 0;
            }
            stl_le_phys(ccw.cda, features.features);
            sch->curr_status.scsw.count = ccw.count - sizeof(features);
            ret = 0;
        }
        break;
    case CCW_CMD_WRITE_FEAT:
        if (check_len) {
            if (ccw.count != sizeof(features)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(features)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            features.index = ldub_phys(ccw.cda + sizeof(features.features));
            features.features = ldl_le_phys(ccw.cda);
            if (features.index < ARRAY_SIZE(dev->host_features)) {
                if (dev->vdev->set_features) {
                    dev->vdev->set_features(dev->vdev, features.features);
                }
                dev->vdev->guest_features = features.features;
            } else {
                /*
                 * If the guest supports more feature bits, assert that it
                 * passes us zeroes for those we don't support.
                 */
                if (features.features) {
                    fprintf(stderr, "Guest bug: features[%i]=%x (expected 0)\n",
                            features.index, features.features);
                    /* XXX: do a unit check here? */
                }
            }
            sch->curr_status.scsw.count = ccw.count - sizeof(features);
            ret = 0;
        }
        break;
    case CCW_CMD_READ_CONF:
        if (check_len) {
            if (ccw.count > dev->vdev->config_len) {
                ret = -EINVAL;
                break;
            }
        }
        len = MIN(ccw.count, dev->vdev->config_len);
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            dev->vdev->get_config(dev->vdev, dev->vdev->config);
            /* XXX config space endianness */
            cpu_physical_memory_write(ccw.cda, dev->vdev->config, len);
            sch->curr_status.scsw.count = ccw.count - len;
            ret = 0;
        }
        break;
    case CCW_CMD_WRITE_CONF:
        if (check_len) {
            if (ccw.count > dev->vdev->config_len) {
                ret = -EINVAL;
                break;
            }
        }
        len = MIN(ccw.count, dev->vdev->config_len);
        hw_len = len;
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            config = cpu_physical_memory_map(ccw.cda, &hw_len, 0);
            if (!config) {
                ret = -EFAULT;
            } else {
                len = hw_len;
                /* XXX config space endianness */
                memcpy(dev->vdev->config, config, len);
                cpu_physical_memory_unmap(config, hw_len, 0, hw_len);
                if (dev->vdev->set_config) {
                    dev->vdev->set_config(dev->vdev, dev->vdev->config);
                }
                sch->curr_status.scsw.count = ccw.count - len;
                ret = 0;
            }
        }
        break;
    case CCW_CMD_WRITE_STATUS:
        if (check_len) {
            if (ccw.count != sizeof(status)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(status)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            status = ldub_phys(ccw.cda);
            virtio_set_status(dev->vdev, status);
            if (dev->vdev->status == 0) {
                virtio_reset(dev->vdev);
            }
            sch->curr_status.scsw.count = ccw.count - sizeof(status);
            ret = 0;
        }
        break;
    case CCW_CMD_SET_IND:
        if (check_len) {
            if (ccw.count != sizeof(indicators)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(indicators)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        indicators = ldq_phys(ccw.cda);
        if (!indicators) {
            ret = -EFAULT;
        } else {
            dev->indicators = indicators;
            sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
            ret = 0;
        }
        break;
    case CCW_CMD_SET_CONF_IND:
        if (check_len) {
            if (ccw.count != sizeof(indicators)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(indicators)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        indicators = ldq_phys(ccw.cda);
        if (!indicators) {
            ret = -EFAULT;
        } else {
            dev->indicators2 = indicators;
            sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
            ret = 0;
        }
        break;
    case CCW_CMD_READ_VQ_CONF:
        if (check_len) {
            if (ccw.count != sizeof(vq_config)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(vq_config)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            vq_config.index = lduw_phys(ccw.cda);
            vq_config.num_max = virtio_queue_get_num(dev->vdev,
                                                     vq_config.index);
            stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max);
            sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
            ret = 0;
        }
        break;
    default:
        ret = -ENOSYS;
        break;
    }
    return ret;
}