Esempio n. 1
0
static uint64_t ivshmem_io_read(void *opaque, hwaddr addr,
                                unsigned size)
{

    IVShmemState *s = opaque;
    uint32_t ret;

    switch (addr)
    {
        case INTRMASK:
            ret = ivshmem_IntrMask_read(s);
            break;

        case INTRSTATUS:
            ret = ivshmem_IntrStatus_read(s);
            break;

        case IVPOSITION:
            /* return my VM ID if the memory is mapped */
            if (memory_region_is_mapped(&s->ivshmem)) {
                ret = s->vm_id;
            } else {
                ret = -1;
            }
            break;

        default:
            IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr);
            ret = 0;
    }

    return ret;
}
Esempio n. 2
0
static void ppc440_pcix_clear_region(MemoryRegion *parent,
                                     MemoryRegion *mem)
{
    if (memory_region_is_mapped(mem)) {
        memory_region_del_subregion(parent, mem);
        object_unparent(OBJECT(mem));
    }
}
Esempio n. 3
0
static void pci_ivshmem_exit(PCIDevice *dev)
{
    IVShmemState *s = IVSHMEM(dev);
    int i;

    fifo8_destroy(&s->incoming_fifo);

    if (s->migration_blocker) {
        migrate_del_blocker(s->migration_blocker);
        error_free(s->migration_blocker);
    }

    if (memory_region_is_mapped(&s->ivshmem)) {
        if (!s->hostmem) {
            void *addr = memory_region_get_ram_ptr(&s->ivshmem);
            int fd;

            if (munmap(addr, s->ivshmem_size) == -1) {
                error_report("Failed to munmap shared memory %s",
                             strerror(errno));
            }

            fd = qemu_get_ram_fd(memory_region_get_ram_addr(&s->ivshmem));
            if (fd != -1) {
                close(fd);
            }
        }

        vmstate_unregister_ram(&s->ivshmem, DEVICE(dev));
        memory_region_del_subregion(&s->bar, &s->ivshmem);
    }

    if (s->eventfd_chr) {
        for (i = 0; i < s->vectors; i++) {
            if (s->eventfd_chr[i]) {
                qemu_chr_free(s->eventfd_chr[i]);
            }
        }
        g_free(s->eventfd_chr);
    }

    if (s->peers) {
        for (i = 0; i < s->nb_peers; i++) {
            close_peer_eventfds(s, i);
        }
        g_free(s->peers);
    }

    if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
        msix_uninit_exclusive_bar(dev);
    }

    g_free(s->msi_vectors);
}
Esempio n. 4
0
static void pc_dimm_check_memdev_is_busy(Object *obj, const char *name,
                                      Object *val, Error **errp)
{
    MemoryRegion *mr;

    mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), errp);
    if (memory_region_is_mapped(mr)) {
        char *path = object_get_canonical_path_component(val);
        error_setg(errp, "can't use already busy memdev: %s", path);
        g_free(path);
    } else {
        qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
    }
}
Esempio n. 5
0
/*
 * Dino can forward memory accesses from the CPU in the range between
 * 0xf0800000 and 0xff000000 to the PCI bus.
 */
static void gsc_to_pci_forwarding(DinoState *s)
{
    uint32_t io_addr_en, tmp;
    int enabled, i;

    tmp = extract32(s->io_control, 7, 2);
    enabled = (tmp == 0x01);
    io_addr_en = s->io_addr_en;

    memory_region_transaction_begin();
    for (i = 1; i < 31; i++) {
        MemoryRegion *mem = &s->pci_mem_alias[i];
        if (enabled && (io_addr_en & (1U << i))) {
            if (!memory_region_is_mapped(mem)) {
                uint32_t addr = 0xf0000000 + i * DINO_MEM_CHUNK_SIZE;
                memory_region_add_subregion(get_system_memory(), addr, mem);
            }
        } else if (memory_region_is_mapped(mem)) {
            memory_region_del_subregion(get_system_memory(), mem);
        }
    }
    memory_region_transaction_commit();
}
Esempio n. 6
0
static void ivshmem_exit(PCIDevice *dev)
{
    IVShmemState *s = IVSHMEM_COMMON(dev);
    int i;

    if (s->migration_blocker) {
        migrate_del_blocker(s->migration_blocker);
        error_free(s->migration_blocker);
    }

    if (memory_region_is_mapped(s->ivshmem_bar2)) {
        if (!s->hostmem) {
            void *addr = memory_region_get_ram_ptr(s->ivshmem_bar2);
            int fd;

            if (munmap(addr, memory_region_size(s->ivshmem_bar2) == -1)) {
                error_report("Failed to munmap shared memory %s",
                             strerror(errno));
            }

            fd = memory_region_get_fd(s->ivshmem_bar2);
            close(fd);
        }

        vmstate_unregister_ram(s->ivshmem_bar2, DEVICE(dev));
    }

    if (s->peers) {
        for (i = 0; i < s->nb_peers; i++) {
            close_peer_eventfds(s, i);
        }
        g_free(s->peers);
    }

    if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
        msix_uninit_exclusive_bar(dev);
    }

    g_free(s->msi_vectors);
}
Esempio n. 7
0
void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
                                          const char *name,
                                          uint64_t ram_size)
{
    uint64_t addr = 0;
    int i;

    if (nb_numa_nodes == 0 || !have_memdevs) {
        allocate_system_memory_nonnuma(mr, owner, name, ram_size);
        return;
    }

    memory_region_init(mr, owner, name, ram_size);
    for (i = 0; i < MAX_NODES; i++) {
        Error *local_err = NULL;
        uint64_t size = numa_info[i].node_mem;
        HostMemoryBackend *backend = numa_info[i].node_memdev;
        if (!backend) {
            continue;
        }
        MemoryRegion *seg = host_memory_backend_get_memory(backend, &local_err);
        if (local_err) {
            error_report_err(local_err);
            exit(1);
        }

        if (memory_region_is_mapped(seg)) {
            char *path = object_get_canonical_path_component(OBJECT(backend));
            error_report("memory backend %s is used multiple times. Each "
                         "-numa option must use a different memdev value.",
                         path);
            exit(1);
        }

        memory_region_add_subregion(mr, addr, seg);
        vmstate_register_ram_global(seg);
        addr += size;
    }
}
Esempio n. 8
0
static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
{
    IVShmemState *s = opaque;
    int incoming_fd;
    int new_eventfd;
    int64_t incoming_posn;
    Error *err = NULL;
    Peer *peer;

    if (!fifo_update_and_get_i64(s, buf, size, &incoming_posn)) {
        return;
    }

    if (incoming_posn < -1) {
        IVSHMEM_DPRINTF("invalid incoming_posn %" PRId64 "\n", incoming_posn);
        return;
    }

    /* pick off s->server_chr->msgfd and store it, posn should accompany msg */
    incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr);
    IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n",
                    incoming_posn, incoming_fd);

    /* make sure we have enough space for this peer */
    if (incoming_posn >= s->nb_peers) {
        if (resize_peers(s, incoming_posn + 1) < 0) {
            error_report("failed to resize peers array");
            if (incoming_fd != -1) {
                close(incoming_fd);
            }
            return;
        }
    }

    peer = &s->peers[incoming_posn];

    if (incoming_fd == -1) {
        /* if posn is positive and unseen before then this is our posn*/
        if (incoming_posn >= 0 && s->vm_id == -1) {
            /* receive our posn */
            s->vm_id = incoming_posn;
        } else {
            /* otherwise an fd == -1 means an existing peer has gone away */
            IVSHMEM_DPRINTF("posn %" PRId64 " has gone away\n", incoming_posn);
            close_peer_eventfds(s, incoming_posn);
        }
        return;
    }

    /* if the position is -1, then it's shared memory region fd */
    if (incoming_posn == -1) {
        void * map_ptr;

        if (memory_region_is_mapped(&s->ivshmem)) {
            error_report("shm already initialized");
            close(incoming_fd);
            return;
        }

        if (check_shm_size(s, incoming_fd, &err) == -1) {
            error_report_err(err);
            close(incoming_fd);
            return;
        }

        /* mmap the region and map into the BAR2 */
        map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED,
                                                            incoming_fd, 0);
        if (map_ptr == MAP_FAILED) {
            error_report("Failed to mmap shared memory %s", strerror(errno));
            close(incoming_fd);
            return;
        }
        memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s),
                                   "ivshmem.bar2", s->ivshmem_size, map_ptr);
        qemu_set_ram_fd(s->ivshmem.ram_addr, incoming_fd);
        vmstate_register_ram(&s->ivshmem, DEVICE(s));

        IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n",
                        map_ptr, s->ivshmem_size);

        memory_region_add_subregion(&s->bar, 0, &s->ivshmem);

        return;
    }

    /* each peer has an associated array of eventfds, and we keep
     * track of how many eventfds received so far */
    /* get a new eventfd: */
    if (peer->nb_eventfds >= s->vectors) {
        error_report("Too many eventfd received, device has %d vectors",
                     s->vectors);
        close(incoming_fd);
        return;
    }

    new_eventfd = peer->nb_eventfds++;

    /* this is an eventfd for a particular peer VM */
    IVSHMEM_DPRINTF("eventfds[%" PRId64 "][%d] = %d\n", incoming_posn,
                    new_eventfd, incoming_fd);
    event_notifier_init_fd(&peer->eventfds[new_eventfd], incoming_fd);
    fcntl_setfl(incoming_fd, O_NONBLOCK); /* msix/irqfd poll non block */

    if (incoming_posn == s->vm_id) {
        setup_interrupt(s, new_eventfd);
    }

    if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
        ivshmem_add_eventfd(s, incoming_posn, new_eventfd);
    }
}
static void
microblaze_generic_fdt_init(MachineState *machine)
{
    CPUState *cpu;
    ram_addr_t ram_kernel_base = 0, ram_kernel_size = 0;
    void *fdt = NULL;
    const char *dtb_arg, *hw_dtb_arg;
    QemuOpts *machine_opts;
    int fdt_size;

    /* for memory node */
    char node_path[DT_PATH_LENGTH];
    FDTMachineInfo *fdti;
    MemoryRegion *main_mem;

    /* For DMA node */
    char dma_path[DT_PATH_LENGTH] = { 0 };
    uint32_t memory_phandle;

    /* For Ethernet nodes */
    char **eth_paths;
    char *phy_path;
    char *mdio_path;
    uint32_t n_eth;
    uint32_t prop_val;

    machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
    if (!machine_opts) {
        goto no_dtb_arg;
    }
    dtb_arg = qemu_opt_get(machine_opts, "dtb");
    hw_dtb_arg = qemu_opt_get(machine_opts, "hw-dtb");
    if (!dtb_arg && !hw_dtb_arg) {
        goto no_dtb_arg;
    }

    /* If the user only provided a -dtb, use it as the hw description.  */
    if (!hw_dtb_arg) {
        hw_dtb_arg = dtb_arg;
    }

    fdt = load_device_tree(hw_dtb_arg, &fdt_size);
    if (!fdt) {
        hw_error("Error: Unable to load Device Tree %s\n", hw_dtb_arg);
        return;
    }

    if (IS_PETALINUX_MACHINE) {
        /* Mark the simple-bus as incompatible as it breaks the Microblaze
         * PetaLinux boot
         */
        add_to_compat_table(NULL, "compatible:simple-bus", NULL);
    }

    /* find memory node or add new one if needed */
    while (qemu_fdt_get_node_by_name(fdt, node_path, "memory")) {
        qemu_fdt_add_subnode(fdt, "/memory@0");
        qemu_fdt_setprop_cells(fdt, "/memory@0", "reg", 0, machine->ram_size);
    }

    if (!qemu_fdt_getprop(fdt, "/memory", "compatible", NULL, 0, NULL)) {
        qemu_fdt_setprop_string(fdt, "/memory", "compatible",
                                "qemu:memory-region");
        qemu_fdt_setprop_cells(fdt, "/memory", "qemu,ram", 1);
    }

    if (IS_PETALINUX_MACHINE) {
        /* If using a *-plnx machine, the AXI DMA memory links are not included
         * in the DTB by default. To avoid seg faults, add the links in here if
         * they have not already been added by the user
         */
        qemu_fdt_get_node_by_name(fdt, dma_path, "dma");

        if (strcmp(dma_path, "") != 0) {
            memory_phandle = qemu_fdt_check_phandle(fdt, node_path);

            if (!memory_phandle) {
                memory_phandle = qemu_fdt_alloc_phandle(fdt);

                qemu_fdt_setprop_cells(fdt, "/memory", "linux,phandle",
                                       memory_phandle);
                qemu_fdt_setprop_cells(fdt, "/memory", "phandle",
                                       memory_phandle);
            }

            if (!qemu_fdt_getprop(fdt, dma_path, "sg", NULL, 0, NULL)) {
                qemu_fdt_setprop_phandle(fdt, dma_path, "sg", node_path);
            }

            if (!qemu_fdt_getprop(fdt, dma_path, "s2mm", NULL, 0, NULL)) {
                qemu_fdt_setprop_phandle(fdt, dma_path, "s2mm", node_path);
            }

            if (!qemu_fdt_getprop(fdt, dma_path, "mm2s", NULL, 0, NULL)) {
                qemu_fdt_setprop_phandle(fdt, dma_path, "mm2s", node_path);
            }
        }

        /* Copy phyaddr value from phy node reg property */
        n_eth = qemu_fdt_get_n_nodes_by_name(fdt, &eth_paths, "ethernet");

        while (n_eth--) {
            mdio_path = qemu_fdt_get_child_by_name(fdt, eth_paths[n_eth],
                                                       "mdio");
            if (mdio_path) {
                phy_path = qemu_fdt_get_child_by_name(fdt, mdio_path,
                                                          "phy");
                if (phy_path) {
                    prop_val = qemu_fdt_getprop_cell(fdt, phy_path, "reg", NULL, 0,
                                                     NULL, &error_abort);
                    qemu_fdt_setprop_cell(fdt, eth_paths[n_eth], "xlnx,phyaddr",
                                          prop_val);
                    g_free(phy_path);
                } else {
                    qemu_log_mask(LOG_GUEST_ERROR, "phy not found in %s",
                                  mdio_path);
                }
                g_free(mdio_path);
            }
            g_free(eth_paths[n_eth]);
        }
        g_free(eth_paths);
    }

    /* Instantiate peripherals from the FDT.  */
    fdti = fdt_generic_create_machine(fdt, NULL);
    main_mem = MEMORY_REGION(object_resolve_path(node_path, NULL));

    ram_kernel_base = object_property_get_int(OBJECT(main_mem), "addr", NULL);
    ram_kernel_size = object_property_get_int(OBJECT(main_mem), "size", NULL);

    if (!memory_region_is_mapped(main_mem)) {
        /* If the memory region is not mapped, map it here.
         * It has to be mapped somewhere, so guess that the base address
         * is where the kernel starts
         */
        memory_region_add_subregion(get_system_memory(), ram_kernel_base,
                                    main_mem);

        if (ram_kernel_base && IS_PETALINUX_MACHINE) {
            /* If the memory added is at an offset from zero QEMU will error
             * when an ISR/exception is triggered. Add a small amount of hack
             * RAM to handle this.
             */
            MemoryRegion *hack_ram = g_new(MemoryRegion, 1);
            memory_region_init_ram(hack_ram, NULL, "hack_ram", 0x1000,
                                   &error_abort);
            vmstate_register_ram_global(hack_ram);
            memory_region_add_subregion(get_system_memory(), 0, hack_ram);
        }
    }

    fdt_init_destroy_fdti(fdti);

    fdt_g = fdt;
    microblaze_load_kernel(MICROBLAZE_CPU(first_cpu), ram_kernel_base,
                           ram_kernel_size, machine->initrd_filename, NULL,
                           microblaze_generic_fdt_reset, 0, fdt, fdt_size);

    /* Register FDT to prop mapper for secondary cores.  */
    cpu = CPU_NEXT(first_cpu);
    while (cpu) {
        qemu_register_reset(secondary_cpu_reset, cpu);
        cpu = CPU_NEXT(cpu);
    }

    return;
no_dtb_arg:
    if (!QTEST_RUNNING) {
        hw_error("DTB must be specified for %s machine model\n", MACHINE_NAME);
    }
    return;
}