Exemple #1
0
/* warning: addr must be aligned. The ram page is not masked as dirty
   and the code inside is not invalidated. It is useful if the dirty
   bits are used to track modified PTEs */
void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
    hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
{
    uint8_t *ptr;
    MemoryRegion *mr;
    hwaddr l = 4;
    hwaddr addr1;
    MemTxResult r;
    uint8_t dirty_log_mask;
    bool release_lock = false;

    RCU_READ_LOCK();
    mr = TRANSLATE(addr, &addr1, &l, true);
    if (l < 4 || !IS_DIRECT(mr, true)) {
        release_lock |= prepare_mmio_access(mr);

        r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
    } else {
        ptr = MAP_RAM(mr, addr1);
        stl_p(ptr, val);

        dirty_log_mask = memory_region_get_dirty_log_mask(mr);
        dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
        cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
                                            4, dirty_log_mask);
        r = MEMTX_OK;
    }
    if (result) {
        *result = r;
    }
    if (release_lock) {
        qemu_mutex_unlock_iothread();
    }
    RCU_READ_UNLOCK();
}
Exemple #2
0
static void pci_ivshmem_exit(PCIDevice *dev)
{
    IVShmemState *s = IVSHMEM(dev);
    int i;

    fifo8_destroy(&s->incoming_fifo);

    if (s->migration_blocker) {
        migrate_del_blocker(s->migration_blocker);
        error_free(s->migration_blocker);
    }

    if (memory_region_is_mapped(&s->ivshmem)) {
        if (!s->hostmem) {
            void *addr = memory_region_get_ram_ptr(&s->ivshmem);
            int fd;

            if (munmap(addr, s->ivshmem_size) == -1) {
                error_report("Failed to munmap shared memory %s",
                             strerror(errno));
            }

            fd = qemu_get_ram_fd(memory_region_get_ram_addr(&s->ivshmem));
            if (fd != -1) {
                close(fd);
            }
        }

        vmstate_unregister_ram(&s->ivshmem, DEVICE(dev));
        memory_region_del_subregion(&s->bar, &s->ivshmem);
    }

    if (s->eventfd_chr) {
        for (i = 0; i < s->vectors; i++) {
            if (s->eventfd_chr[i]) {
                qemu_chr_free(s->eventfd_chr[i]);
            }
        }
        g_free(s->eventfd_chr);
    }

    if (s->peers) {
        for (i = 0; i < s->nb_peers; i++) {
            close_peer_eventfds(s, i);
        }
        g_free(s->peers);
    }

    if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
        msix_uninit_exclusive_bar(dev);
    }

    g_free(s->msi_vectors);
}
Exemple #3
0
/* create the shared memory BAR when we are not using the server, so we can
 * create the BAR and map the memory immediately */
static int create_shared_memory_BAR(IVShmemState *s, int fd, uint8_t attr,
                                    Error **errp)
{
    void * ptr;

    ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
    if (ptr == MAP_FAILED) {
        error_setg_errno(errp, errno, "Failed to mmap shared memory");
        return -1;
    }

    memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2",
                               s->ivshmem_size, ptr);
    qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem), fd);
    vmstate_register_ram(&s->ivshmem, DEVICE(s));
    memory_region_add_subregion(&s->bar, 0, &s->ivshmem);

    /* region for shared memory */
    pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar);

    return 0;
}
Exemple #4
0
/* Add a new TLB entry. At most one entry for a given virtual address
 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
 * supplied size is only used by tlb_flush_page.
 *
 * Called from TCG-generated code, which is under an RCU read-side
 * critical section.
 */
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
                             hwaddr paddr, MemTxAttrs attrs, int prot,
                             int mmu_idx, target_ulong size)
{
    CPUArchState *env = cpu->env_ptr;
    MemoryRegionSection *section;
    unsigned int index;
    target_ulong address;
    target_ulong code_address;
    uintptr_t addend;
    CPUTLBEntry *te, tn;
    hwaddr iotlb, xlat, sz, paddr_page;
    target_ulong vaddr_page;
    int asidx = cpu_asidx_from_attrs(cpu, attrs);

    assert_cpu_is_self(cpu);

    if (size <= TARGET_PAGE_SIZE) {
        sz = TARGET_PAGE_SIZE;
    } else {
        tlb_add_large_page(env, mmu_idx, vaddr, size);
        sz = size;
    }
    vaddr_page = vaddr & TARGET_PAGE_MASK;
    paddr_page = paddr & TARGET_PAGE_MASK;

    section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
                                                &xlat, &sz, attrs, &prot);
    assert(sz >= TARGET_PAGE_SIZE);

    tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
              " prot=%x idx=%d\n",
              vaddr, paddr, prot, mmu_idx);

    address = vaddr_page;
    if (size < TARGET_PAGE_SIZE) {
        /*
         * Slow-path the TLB entries; we will repeat the MMU check and TLB
         * fill on every access.
         */
        address |= TLB_RECHECK;
    }
    if (!memory_region_is_ram(section->mr) &&
        !memory_region_is_romd(section->mr)) {
        /* IO memory case */
        address |= TLB_MMIO;
        addend = 0;
    } else {
        /* TLB_MMIO for rom/romd handled below */
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
    }

    code_address = address;
    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
                                            paddr_page, xlat, prot, &address);

    index = tlb_index(env, mmu_idx, vaddr_page);
    te = tlb_entry(env, mmu_idx, vaddr_page);

    /*
     * Hold the TLB lock for the rest of the function. We could acquire/release
     * the lock several times in the function, but it is faster to amortize the
     * acquisition cost by acquiring it just once. Note that this leads to
     * a longer critical section, but this is not a concern since the TLB lock
     * is unlikely to be contended.
     */
    qemu_spin_lock(&env->tlb_c.lock);

    /* Note that the tlb is no longer clean.  */
    env->tlb_c.dirty |= 1 << mmu_idx;

    /* Make sure there's no cached translation for the new page.  */
    tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);

    /*
     * Only evict the old entry to the victim tlb if it's for a
     * different page; otherwise just overwrite the stale data.
     */
    if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
        unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
        CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];

        /* Evict the old entry into the victim tlb.  */
        copy_tlb_helper_locked(tv, te);
        env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
        tlb_n_used_entries_dec(env, mmu_idx);
    }

    /* refill the tlb */
    /*
     * At this point iotlb contains a physical section number in the lower
     * TARGET_PAGE_BITS, and either
     *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
     *  + the offset within section->mr of the page base (otherwise)
     * We subtract the vaddr_page (which is page aligned and thus won't
     * disturb the low bits) to give an offset which can be added to the
     * (non-page-aligned) vaddr of the eventual memory access to get
     * the MemoryRegion offset for the access. Note that the vaddr we
     * subtract here is that of the page base, and not the same as the
     * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
     */
    env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
    env->iotlb[mmu_idx][index].attrs = attrs;

    /* Now calculate the new entry */
    tn.addend = addend - vaddr_page;
    if (prot & PAGE_READ) {
        tn.addr_read = address;
    } else {
        tn.addr_read = -1;
    }

    if (prot & PAGE_EXEC) {
        tn.addr_code = code_address;
    } else {
        tn.addr_code = -1;
    }

    tn.addr_write = -1;
    if (prot & PAGE_WRITE) {
        if ((memory_region_is_ram(section->mr) && section->readonly)
            || memory_region_is_romd(section->mr)) {
            /* Write access calls the I/O callback.  */
            tn.addr_write = address | TLB_MMIO;
        } else if (memory_region_is_ram(section->mr)
                   && cpu_physical_memory_is_clean(
                       memory_region_get_ram_addr(section->mr) + xlat)) {
            tn.addr_write = address | TLB_NOTDIRTY;
        } else {
            tn.addr_write = address;
        }
        if (prot & PAGE_WRITE_INV) {
            tn.addr_write |= TLB_INVALID_MASK;
        }
    }

    copy_tlb_helper_locked(te, &tn);
    tlb_n_used_entries_inc(env, mmu_idx);
    qemu_spin_unlock(&env->tlb_c.lock);
}
Exemple #5
0
static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
{
    IVShmemState *s = opaque;
    int incoming_fd;
    int new_eventfd;
    int64_t incoming_posn;
    Error *err = NULL;
    Peer *peer;

    if (!fifo_update_and_get_i64(s, buf, size, &incoming_posn)) {
        return;
    }

    if (incoming_posn < -1) {
        IVSHMEM_DPRINTF("invalid incoming_posn %" PRId64 "\n", incoming_posn);
        return;
    }

    /* pick off s->server_chr->msgfd and store it, posn should accompany msg */
    incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr);
    IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n",
                    incoming_posn, incoming_fd);

    /* make sure we have enough space for this peer */
    if (incoming_posn >= s->nb_peers) {
        if (resize_peers(s, incoming_posn + 1) < 0) {
            error_report("failed to resize peers array");
            if (incoming_fd != -1) {
                close(incoming_fd);
            }
            return;
        }
    }

    peer = &s->peers[incoming_posn];

    if (incoming_fd == -1) {
        /* if posn is positive and unseen before then this is our posn*/
        if (incoming_posn >= 0 && s->vm_id == -1) {
            /* receive our posn */
            s->vm_id = incoming_posn;
        } else {
            /* otherwise an fd == -1 means an existing peer has gone away */
            IVSHMEM_DPRINTF("posn %" PRId64 " has gone away\n", incoming_posn);
            close_peer_eventfds(s, incoming_posn);
        }
        return;
    }

    /* if the position is -1, then it's shared memory region fd */
    if (incoming_posn == -1) {
        void * map_ptr;

        if (memory_region_is_mapped(&s->ivshmem)) {
            error_report("shm already initialized");
            close(incoming_fd);
            return;
        }

        if (check_shm_size(s, incoming_fd, &err) == -1) {
            error_report_err(err);
            close(incoming_fd);
            return;
        }

        /* mmap the region and map into the BAR2 */
        map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED,
                                                            incoming_fd, 0);
        if (map_ptr == MAP_FAILED) {
            error_report("Failed to mmap shared memory %s", strerror(errno));
            close(incoming_fd);
            return;
        }
        memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s),
                                   "ivshmem.bar2", s->ivshmem_size, map_ptr);
        qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem),
                        incoming_fd);
        vmstate_register_ram(&s->ivshmem, DEVICE(s));

        IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n",
                        map_ptr, s->ivshmem_size);

        memory_region_add_subregion(&s->bar, 0, &s->ivshmem);

        return;
    }

    /* each peer has an associated array of eventfds, and we keep
     * track of how many eventfds received so far */
    /* get a new eventfd: */
    if (peer->nb_eventfds >= s->vectors) {
        error_report("Too many eventfd received, device has %d vectors",
                     s->vectors);
        close(incoming_fd);
        return;
    }

    new_eventfd = peer->nb_eventfds++;

    /* this is an eventfd for a particular peer VM */
    IVSHMEM_DPRINTF("eventfds[%" PRId64 "][%d] = %d\n", incoming_posn,
                    new_eventfd, incoming_fd);
    event_notifier_init_fd(&peer->eventfds[new_eventfd], incoming_fd);
    fcntl_setfl(incoming_fd, O_NONBLOCK); /* msix/irqfd poll non block */

    if (incoming_posn == s->vm_id) {
        setup_interrupt(s, new_eventfd);
    }

    if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
        ivshmem_add_eventfd(s, incoming_posn, new_eventfd);
    }
}