Esempio n. 1
0
static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
                             uint64_t src)
{
    hwaddr dest_phys;
    hwaddr src_phys;
    hwaddr len = l;
    void *dest_p;
    void *src_p;
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
    int flags;

    if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
        cpu_stb_data(env, dest, 0);
        cpu_abort(env, "should never reach here");
    }
    dest_phys |= dest & ~TARGET_PAGE_MASK;

    if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
        cpu_ldub_data(env, src);
        cpu_abort(env, "should never reach here");
    }
    src_phys |= src & ~TARGET_PAGE_MASK;

    dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
    src_p = cpu_physical_memory_map(src_phys, &len, 0);

    memmove(dest_p, src_p, len);

    cpu_physical_memory_unmap(dest_p, 1, len, len);
    cpu_physical_memory_unmap(src_p, 0, len, len);
}
Esempio n. 2
0
static int onedram_write_shm(S5pc1xxOneDRAMState *s,
                             const uint8_t *buf, uint32_t offset,
                             uint32_t size)
{
    uint8_t *src_base;
    target_phys_addr_t phy_base, src_len;
    phy_base = ONEDRAM_SHARED_BASE + offset;
    src_len = size;

    if (!onedram_can_access_shm(s)) {
        fprintf(stderr,
                "onedram_write_shm : can't access to fmt\n");
        return 0;
    }

    if (size > ONEDRAM_IN_FMT_SIZE){
        fprintf(stderr,
                "onedram_write_shm : size exceeds the maximum\n");
        return 0;
    }

    src_base = cpu_physical_memory_map(phy_base, &src_len, 1);

    if (!src_base) {
        fprintf(stderr,
                "onedram_write_shm : src_base is NULL\n");
        return 0;
    }

    memcpy(src_base, buf, size);

    cpu_physical_memory_unmap(src_base, src_len, 1, src_len);

    return 1;
}
Esempio n. 3
0
static size_t write_run(WinDumpPhyMemRun64 *run, int fd, Error **errp)
{
    void *buf;
    uint64_t addr = run->BasePage << TARGET_PAGE_BITS;
    uint64_t size = run->PageCount << TARGET_PAGE_BITS;
    uint64_t len, l;
    size_t total = 0;

    while (size) {
        len = size;

        buf = cpu_physical_memory_map(addr, &len, false);
        if (!buf) {
            error_setg(errp, "win-dump: failed to map physical range"
                             " 0x%016" PRIx64 "-0x%016" PRIx64, addr, addr + size - 1);
            return 0;
        }

        l = qemu_write_full(fd, buf, len);
        cpu_physical_memory_unmap(buf, addr, false, len);
        if (l != len) {
            error_setg(errp, QERR_IO_ERROR);
            return 0;
        }

        addr += l;
        size -= l;
        total += l;
    }

    return total;
}
Esempio n. 4
0
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
                                      uint64_t start_addr,
                                      uint64_t size)
{
    int i;
    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
        target_phys_addr_t l;
        void *p;

        if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
            continue;
        }
        l = vq->ring_size;
        p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
        if (!p || l != vq->ring_size) {
            fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
            return -ENOMEM;
        }
        if (p != vq->ring) {
            fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
            return -EBUSY;
        }
        cpu_physical_memory_unmap(p, l, 0, 0);
    }
    return 0;
}
Esempio n. 5
0
        /* Default case, create a single watchpoint.  */
        cpu_watchpoint_insert(cs, env->cregs[10],
                              env->cregs[11] - env->cregs[10] + 1,
                              wp_flags, NULL);
    }
}

typedef struct SigpSaveArea {
    uint64_t    fprs[16];                       /* 0x0000 */
    uint64_t    grs[16];                        /* 0x0080 */
    PSW         psw;                            /* 0x0100 */
    uint8_t     pad_0x0110[0x0118 - 0x0110];    /* 0x0110 */
    uint32_t    prefix;                         /* 0x0118 */
    uint32_t    fpc;                            /* 0x011c */
    uint8_t     pad_0x0120[0x0124 - 0x0120];    /* 0x0120 */
    uint32_t    todpr;                          /* 0x0124 */
    uint64_t    cputm;                          /* 0x0128 */
    uint64_t    ckc;                            /* 0x0130 */
    uint8_t     pad_0x0138[0x0140 - 0x0138];    /* 0x0138 */
    uint32_t    ars[16];                        /* 0x0140 */
    uint64_t    crs[16];                        /* 0x0384 */
} SigpSaveArea;
QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512);

int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
{
    static const uint8_t ar_id = 1;
    SigpSaveArea *sa;
    hwaddr len = sizeof(*sa);
    int i;

    sa = cpu_physical_memory_map(addr, &len, 1);
    if (!sa) {
        return -EFAULT;
    }
    if (len != sizeof(*sa)) {
        cpu_physical_memory_unmap(sa, len, 1, 0);
        return -EFAULT;
    }

    if (store_arch) {
        cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
    }
    for (i = 0; i < 16; ++i) {
        sa->fprs[i] = cpu_to_be64(get_freg(&cpu->env, i)->ll);
    }
    for (i = 0; i < 16; ++i) {
        sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
    }
    sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
    sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
    sa->prefix = cpu_to_be32(cpu->env.psa);
    sa->fpc = cpu_to_be32(cpu->env.fpc);
    sa->todpr = cpu_to_be32(cpu->env.todpr);
    sa->cputm = cpu_to_be64(cpu->env.cputm);
    sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
    for (i = 0; i < 16; ++i) {
        sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
    }
    for (i = 0; i < 16; ++i) {
        sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
    }

    cpu_physical_memory_unmap(sa, len, 1, len);

    return 0;
}

typedef struct SigpAdtlSaveArea {
    uint64_t    vregs[32][2];                     /* 0x0000 */
    uint8_t     pad_0x0200[0x0400 - 0x0200];      /* 0x0200 */
    uint64_t    gscb[4];                          /* 0x0400 */
    uint8_t     pad_0x0420[0x1000 - 0x0420];      /* 0x0420 */
} SigpAdtlSaveArea;
QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096);

#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
{
    SigpAdtlSaveArea *sa;
    hwaddr save = len;
    int i;

    sa = cpu_physical_memory_map(addr, &save, 1);
    if (!sa) {
        return -EFAULT;
    }
    if (save != len) {
        cpu_physical_memory_unmap(sa, len, 1, 0);
        return -EFAULT;
    }

    if (s390_has_feat(S390_FEAT_VECTOR)) {
        for (i = 0; i < 32; i++) {
            sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0].ll);
            sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1].ll);
        }
    }
    if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
        for (i = 0; i < 4; i++) {
            sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]);
        }
    }

    cpu_physical_memory_unmap(sa, len, 1, len);
    return 0;
}
Esempio n. 6
0
        /* Default case, create a single watchpoint.  */
        cpu_watchpoint_insert(cs, env->cregs[10],
                              env->cregs[11] - env->cregs[10] + 1,
                              wp_flags, NULL);
    }
}

struct sigp_save_area {
    uint64_t    fprs[16];                       /* 0x0000 */
    uint64_t    grs[16];                        /* 0x0080 */
    PSW         psw;                            /* 0x0100 */
    uint8_t     pad_0x0110[0x0118 - 0x0110];    /* 0x0110 */
    uint32_t    prefix;                         /* 0x0118 */
    uint32_t    fpc;                            /* 0x011c */
    uint8_t     pad_0x0120[0x0124 - 0x0120];    /* 0x0120 */
    uint32_t    todpr;                          /* 0x0124 */
    uint64_t    cputm;                          /* 0x0128 */
    uint64_t    ckc;                            /* 0x0130 */
    uint8_t     pad_0x0138[0x0140 - 0x0138];    /* 0x0138 */
    uint32_t    ars[16];                        /* 0x0140 */
    uint64_t    crs[16];                        /* 0x0384 */
};
QEMU_BUILD_BUG_ON(sizeof(struct sigp_save_area) != 512);

int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
{
    static const uint8_t ar_id = 1;
    struct sigp_save_area *sa;
    hwaddr len = sizeof(*sa);
    int i;

    sa = cpu_physical_memory_map(addr, &len, 1);
    if (!sa) {
        return -EFAULT;
    }
    if (len != sizeof(*sa)) {
        cpu_physical_memory_unmap(sa, len, 1, 0);
        return -EFAULT;
    }

    if (store_arch) {
        cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
    }
    for (i = 0; i < 16; ++i) {
        sa->fprs[i] = cpu_to_be64(get_freg(&cpu->env, i)->ll);
    }
    for (i = 0; i < 16; ++i) {
        sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
    }
    sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
    sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
    sa->prefix = cpu_to_be32(cpu->env.psa);
    sa->fpc = cpu_to_be32(cpu->env.fpc);
    sa->todpr = cpu_to_be32(cpu->env.todpr);
    sa->cputm = cpu_to_be64(cpu->env.cputm);
    sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
    for (i = 0; i < 16; ++i) {
        sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
    }
    for (i = 0; i < 16; ++i) {
        sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
    }

    cpu_physical_memory_unmap(sa, len, 1, len);

    return 0;
}

#define ADTL_GS_OFFSET   1024 /* offset of GS data in adtl save area */
#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
{
    hwaddr save = len;
    void *mem;

    mem = cpu_physical_memory_map(addr, &save, 1);
    if (!mem) {
        return -EFAULT;
    }
    if (save != len) {
        cpu_physical_memory_unmap(mem, len, 1, 0);
        return -EFAULT;
    }

    /* FIXME: as soon as TCG supports these features, convert cpu->be */
    if (s390_has_feat(S390_FEAT_VECTOR)) {
        memcpy(mem, &cpu->env.vregs, 512);
    }
    if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
        memcpy(mem + ADTL_GS_OFFSET, &cpu->env.gscb, 32);
    }

    cpu_physical_memory_unmap(mem, len, 1, len);

    return 0;
}
Esempio n. 7
0
/* PC hardware initialisation */
static void s390_init(MachineState *machine)
{
    ram_addr_t my_ram_size = machine->ram_size;
    MemoryRegion *sysmem = get_system_memory();
    MemoryRegion *ram = g_new(MemoryRegion, 1);
    int increment_size = 20;
    uint8_t *storage_keys;
    void *virtio_region;
    hwaddr virtio_region_len;
    hwaddr virtio_region_start;

    /*
     * The storage increment size is a multiple of 1M and is a power of 2.
     * The number of storage increments must be MAX_STORAGE_INCREMENTS or
     * fewer.
     */
    while ((my_ram_size >> increment_size) > MAX_STORAGE_INCREMENTS) {
        increment_size++;
    }
    my_ram_size = my_ram_size >> increment_size << increment_size;

    /* let's propagate the changed ram size into the global variable. */
    ram_size = my_ram_size;

    /* get a BUS */
    s390_bus = s390_virtio_bus_init(&my_ram_size);
    s390_sclp_init();
    s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
                      machine->initrd_filename, ZIPL_FILENAME);
    s390_flic_init();

    /* register hypercalls */
    s390_virtio_register_hcalls();

    /* allocate RAM */
    memory_region_init_ram(ram, NULL, "s390.ram", my_ram_size);
    vmstate_register_ram_global(ram);
    memory_region_add_subregion(sysmem, 0, ram);

    /* clear virtio region */
    virtio_region_len = my_ram_size - ram_size;
    virtio_region_start = ram_size;
    virtio_region = cpu_physical_memory_map(virtio_region_start,
                                            &virtio_region_len, true);
    memset(virtio_region, 0, virtio_region_len);
    cpu_physical_memory_unmap(virtio_region, virtio_region_len, 1,
                              virtio_region_len);

    /* allocate storage keys */
    storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE);

    /* init CPUs */
    s390_init_cpus(machine->cpu_model, storage_keys);

    /* Create VirtIO network adapters */
    s390_create_virtio_net((BusState *)s390_bus, "virtio-net-s390");
}
Esempio n. 8
0
static void dma_bdrv_cb(void *opaque, int ret)
{
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
    target_phys_addr_t cur_addr, cur_len;
    void *mem;

    dbs->acb = NULL;
    dbs->sector_num += dbs->iov.size / 512;
    dma_bdrv_unmap(dbs);
    qemu_iovec_reset(&dbs->iov);

    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
        dbs->common.cb(dbs->common.opaque, ret);
        qemu_iovec_destroy(&dbs->iov);
        qemu_aio_release(dbs);
        return;
    }

    while (dbs->sg_cur_index < dbs->sg->nsg) {
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
        if (!mem)
            break;
        qemu_iovec_add(&dbs->iov, mem, cur_len);
        dbs->sg_cur_byte += cur_len;
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
            dbs->sg_cur_byte = 0;
            ++dbs->sg_cur_index;
        }
    }

    if (dbs->iov.size == 0) {
        cpu_register_map_client(dbs, continue_after_map_failure);
        return;
    }

    if (dbs->is_write) {
        dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
                                   dbs->iov.size / 512, dma_bdrv_cb, dbs);
    } else {
        dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
                                  dbs->iov.size / 512, dma_bdrv_cb, dbs);
    }
    if (!dbs->acb) {
        dma_bdrv_unmap(dbs);
        qemu_iovec_destroy(&dbs->iov);
        return;
    }
}
Esempio n. 9
0
File: ahci.c Progetto: NormanM/qemu
static void map_page(uint8_t **ptr, uint64_t addr, uint32_t wanted)
{
    hwaddr len = wanted;

    if (*ptr) {
        cpu_physical_memory_unmap(*ptr, len, 1, len);
    }

    *ptr = cpu_physical_memory_map(addr, &len, 1);
    if (len < wanted) {
        cpu_physical_memory_unmap(*ptr, len, 1, len);
        *ptr = NULL;
    }
}
Esempio n. 10
0
File: ipl.c Progetto: nikunjad/qemu
static void s390_ipl_prepare_qipl(S390CPU *cpu)
{
    S390IPLState *ipl = get_ipl_device();
    uint8_t *addr;
    uint64_t len = 4096;

    addr = cpu_physical_memory_map(cpu->env.psa, &len, 1);
    if (!addr || len < QIPL_ADDRESS + sizeof(QemuIplParameters)) {
        error_report("Cannot set QEMU IPL parameters");
        return;
    }
    memcpy(addr + QIPL_ADDRESS, &ipl->qipl, sizeof(QemuIplParameters));
    cpu_physical_memory_unmap(addr, len, 1, len);
}
Esempio n. 11
0
LowCore *cpu_map_lowcore(CPUS390XState *env)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    LowCore *lowcore;
    hwaddr len = sizeof(LowCore);

    lowcore = cpu_physical_memory_map(env->psa, &len, 1);

    if (len < sizeof(LowCore)) {
        cpu_abort(CPU(cpu), "Could not map lowcore\n");
    }

    return lowcore;
}
Esempio n. 12
0
static void rtas_nvram_fetch(PowerPCCPU *cpu, sPAPREnvironment *spapr,
                             uint32_t token, uint32_t nargs,
                             target_ulong args,
                             uint32_t nret, target_ulong rets)
{
    sPAPRNVRAM *nvram = spapr->nvram;
    hwaddr offset, buffer, len;
    int alen;
    void *membuf;

    if ((nargs != 3) || (nret != 2)) {
        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
        return;
    }

    if (!nvram) {
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
        rtas_st(rets, 1, 0);
        return;
    }

    offset = rtas_ld(args, 0);
    buffer = rtas_ld(args, 1);
    len = rtas_ld(args, 2);

    if (((offset + len) < offset)
        || ((offset + len) > nvram->size)) {
        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
        rtas_st(rets, 1, 0);
        return;
    }

    membuf = cpu_physical_memory_map(buffer, &len, 1);
    if (nvram->drive) {
        alen = bdrv_pread(nvram->drive, offset, membuf, len);
    } else {
        assert(nvram->buf);

        memcpy(membuf, nvram->buf + offset, len);
        alen = len;
    }
    cpu_physical_memory_unmap(membuf, len, 1, len);

    rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS);
    rtas_st(rets, 1, (alen < 0) ? 0 : alen);
}
Esempio n. 13
0
/* PC hardware initialisation */
static void s390_init(MachineState *machine)
{
    ram_addr_t my_ram_size;
    void *virtio_region;
    hwaddr virtio_region_len;
    hwaddr virtio_region_start;

    if (machine->ram_slots) {
        error_report("Memory hotplug not supported by the selected machine.");
        exit(EXIT_FAILURE);
    }
    s390_sclp_init();
    my_ram_size = machine->ram_size;

    /* get a BUS */
    s390_bus = s390_virtio_bus_init(&my_ram_size);
    s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
                      machine->initrd_filename, ZIPL_FILENAME, false);
    s390_flic_init();

    /* register hypercalls */
    s390_virtio_register_hcalls();

    /* allocate RAM */
    s390_memory_init(my_ram_size);

    /* clear virtio region */
    virtio_region_len = my_ram_size - ram_size;
    virtio_region_start = ram_size;
    virtio_region = cpu_physical_memory_map(virtio_region_start,
                                            &virtio_region_len, true);
    memset(virtio_region, 0, virtio_region_len);
    cpu_physical_memory_unmap(virtio_region, virtio_region_len, 1,
                              virtio_region_len);

    /* init CPUs */
    s390_init_cpus(machine->cpu_model);

    /* Create VirtIO network adapters */
    s390_create_virtio_net((BusState *)s390_bus, "virtio-net-s390");

    /* Register savevm handler for guest TOD clock */
    register_savevm(NULL, "todclock", 0, 1, gtod_save, gtod_load, NULL);
}
Esempio n. 14
0
int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
{
    static const uint8_t ar_id = 1;
    struct sigp_save_area *sa;
    hwaddr len = sizeof(*sa);
    int i;

    sa = cpu_physical_memory_map(addr, &len, 1);
    if (!sa) {
        return -EFAULT;
    }
    if (len != sizeof(*sa)) {
        cpu_physical_memory_unmap(sa, len, 1, 0);
        return -EFAULT;
    }

    if (store_arch) {
        cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
    }
    for (i = 0; i < 16; ++i) {
        sa->fprs[i] = cpu_to_be64(get_freg(&cpu->env, i)->ll);
    }
    for (i = 0; i < 16; ++i) {
        sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
    }
    sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
    sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
    sa->prefix = cpu_to_be32(cpu->env.psa);
    sa->fpc = cpu_to_be32(cpu->env.fpc);
    sa->todpr = cpu_to_be32(cpu->env.todpr);
    sa->cputm = cpu_to_be64(cpu->env.cputm);
    sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
    for (i = 0; i < 16; ++i) {
        sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
    }
    for (i = 0; i < 16; ++i) {
        sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
    }

    cpu_physical_memory_unmap(sa, len, 1, len);

    return 0;
}
Esempio n. 15
0
static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
                            uint8_t byte)
{
    hwaddr dest_phys;
    hwaddr len = l;
    void *dest_p;
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
    int flags;

    if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
        cpu_stb_data(env, dest, byte);
        cpu_abort(env, "should never reach here");
    }
    dest_phys |= dest & ~TARGET_PAGE_MASK;

    dest_p = cpu_physical_memory_map(dest_phys, &len, 1);

    memset(dest_p, byte, len);

    cpu_physical_memory_unmap(dest_p, 1, len, len);
}
Esempio n. 16
0
static int vhost_virtqueue_init(struct vhost_dev *dev,
                                struct VirtIODevice *vdev,
                                struct vhost_virtqueue *vq,
                                unsigned idx)
{
    target_phys_addr_t s, l, a;
    int r;
    struct vhost_vring_file file = {
        .index = idx,
    };
    struct vhost_vring_state state = {
        .index = idx,
    };
    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);

    if (!vdev->binding->set_host_notifier) {
        fprintf(stderr, "binding does not support host notifiers\n");
        return -ENOSYS;
    }

    vq->num = state.num = virtio_queue_get_num(vdev, idx);
    r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
    if (r) {
        return -errno;
    }

    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
    r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
    if (r) {
        return -errno;
    }

    s = l = virtio_queue_get_desc_size(vdev, idx);
    a = virtio_queue_get_desc_addr(vdev, idx);
    vq->desc = cpu_physical_memory_map(a, &l, 0);
    if (!vq->desc || l != s) {
        r = -ENOMEM;
        goto fail_alloc_desc;
    }
    s = l = virtio_queue_get_avail_size(vdev, idx);
    a = virtio_queue_get_avail_addr(vdev, idx);
    vq->avail = cpu_physical_memory_map(a, &l, 0);
    if (!vq->avail || l != s) {
        r = -ENOMEM;
        goto fail_alloc_avail;
    }
    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
    vq->used = cpu_physical_memory_map(a, &l, 1);
    if (!vq->used || l != s) {
        r = -ENOMEM;
        goto fail_alloc_used;
    }

    vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
    vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
    vq->ring = cpu_physical_memory_map(a, &l, 1);
    if (!vq->ring || l != s) {
        r = -ENOMEM;
        goto fail_alloc_ring;
    }

    r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
    if (r < 0) {
        r = -errno;
        goto fail_alloc;
    }
    r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
    if (r < 0) {
        fprintf(stderr, "Error binding host notifier: %d\n", -r);
        goto fail_host_notifier;
    }

    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
    r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
    if (r) {
        r = -errno;
        goto fail_kick;
    }

    file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
    r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
    if (r) {
        r = -errno;
        goto fail_call;
    }

    return 0;

fail_call:
fail_kick:
    vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
fail_host_notifier:
fail_alloc:
    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
                              0, 0);
fail_alloc_ring:
    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
                              0, 0);
fail_alloc_used:
    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
                              0, 0);
fail_alloc_avail:
    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
                              0, 0);
fail_alloc_desc:
    return r;
}
Esempio n. 17
0
File: virtio.c Progetto: iggy/qemu
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
{
    unsigned int i, head, max;
    target_phys_addr_t desc_pa = vq->vring.desc;
    target_phys_addr_t len;

    if (!virtqueue_num_heads(vq, vq->last_avail_idx))
        return 0;

    /* When we start there are none of either input nor output. */
    elem->out_num = elem->in_num = 0;

    max = vq->vring.num;

    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);

    if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
        if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
            fprintf(stderr, "Invalid size for indirect buffer table\n");
            exit(1);
        }

        /* loop over the indirect descriptor table */
        max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
        desc_pa = vring_desc_addr(desc_pa, i);
        i = 0;
    }

    do {
        struct iovec *sg;
        int is_write = 0;

        if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
            elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
            sg = &elem->in_sg[elem->in_num++];
            is_write = 1;
        } else
            sg = &elem->out_sg[elem->out_num++];

        /* Grab the first descriptor, and check it's OK. */
        sg->iov_len = vring_desc_len(desc_pa, i);
        len = sg->iov_len;

        sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i),
                                               &len, is_write);

        if (sg->iov_base == NULL || len != sg->iov_len) {
            fprintf(stderr, "virtio: trying to map MMIO memory\n");
            exit(1);
        }

        /* If we've got too many, that implies a descriptor loop. */
        if ((elem->in_num + elem->out_num) > max) {
            fprintf(stderr, "Looped descriptor");
            exit(1);
        }
    } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);

    elem->index = head;

    vq->inuse++;

    return elem->in_num + elem->out_num;
}
Esempio n. 18
0
/* PC hardware initialisation */
static void s390_init(ram_addr_t my_ram_size,
                      const char *boot_device,
                      const char *kernel_filename,
                      const char *kernel_cmdline,
                      const char *initrd_filename,
                      const char *cpu_model)
{
    CPUState *env = NULL;
    MemoryRegion *sysmem = get_system_memory();
    MemoryRegion *ram = g_new(MemoryRegion, 1);
    ram_addr_t kernel_size = 0;
    ram_addr_t initrd_offset;
    ram_addr_t initrd_size = 0;
    int shift = 0;
    uint8_t *storage_keys;
    void *virtio_region;
    target_phys_addr_t virtio_region_len;
    target_phys_addr_t virtio_region_start;
    int i;

    /* s390x ram size detection needs a 16bit multiplier + an increment. So
       guests > 64GB can be specified in 2MB steps etc. */
    while ((my_ram_size >> (20 + shift)) > 65535) {
        shift++;
    }
    my_ram_size = my_ram_size >> (20 + shift) << (20 + shift);

    /* lets propagate the changed ram size into the global variable. */
    ram_size = my_ram_size;

    /* get a BUS */
    s390_bus = s390_virtio_bus_init(&my_ram_size);

    /* allocate RAM */
    memory_region_init_ram(ram, "s390.ram", my_ram_size);
    vmstate_register_ram_global(ram);
    memory_region_add_subregion(sysmem, 0, ram);

    /* clear virtio region */
    virtio_region_len = my_ram_size - ram_size;
    virtio_region_start = ram_size;
    virtio_region = cpu_physical_memory_map(virtio_region_start,
                                            &virtio_region_len, true);
    memset(virtio_region, 0, virtio_region_len);
    cpu_physical_memory_unmap(virtio_region, virtio_region_len, 1,
                              virtio_region_len);

    /* allocate storage keys */
    storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE);

    /* init CPUs */
    if (cpu_model == NULL) {
        cpu_model = "host";
    }

    ipi_states = g_malloc(sizeof(CPUState *) * smp_cpus);

    for (i = 0; i < smp_cpus; i++) {
        CPUState *tmp_env;

        tmp_env = cpu_init(cpu_model);
        if (!env) {
            env = tmp_env;
        }
        ipi_states[i] = tmp_env;
        tmp_env->halted = 1;
        tmp_env->exception_index = EXCP_HLT;
        tmp_env->storage_keys = storage_keys;
    }

    /* One CPU has to run */
    s390_add_running_cpu(env);

    if (kernel_filename) {

        kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, NULL,
                               NULL, 1, ELF_MACHINE, 0);
        if (kernel_size == -1UL) {
            kernel_size = load_image_targphys(kernel_filename, 0, ram_size);
        }
        /*
         * we can not rely on the ELF entry point, since up to 3.2 this
         * value was 0x800 (the SALIPL loader) and it wont work. For
         * all (Linux) cases 0x10000 (KERN_IMAGE_START) should be fine.
         */
        env->psw.addr = KERN_IMAGE_START;
        env->psw.mask = 0x0000000180000000ULL;
    } else {
        ram_addr_t bios_size = 0;
        char *bios_filename;

        /* Load zipl bootloader */
        if (bios_name == NULL) {
            bios_name = ZIPL_FILENAME;
        }

        bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
        bios_size = load_image_targphys(bios_filename, ZIPL_LOAD_ADDR, 4096);
        g_free(bios_filename);

        if ((long)bios_size < 0) {
            hw_error("could not load bootloader '%s'\n", bios_name);
        }

        if (bios_size > 4096) {
            hw_error("stage1 bootloader is > 4k\n");
        }

        env->psw.addr = ZIPL_START;
        env->psw.mask = 0x0000000180000000ULL;
    }

    if (initrd_filename) {
        initrd_offset = INITRD_START;
        while (kernel_size + 0x100000 > initrd_offset) {
            initrd_offset += 0x100000;
        }
        initrd_size = load_image_targphys(initrd_filename, initrd_offset,
                                          ram_size - initrd_offset);
        /* we have to overwrite values in the kernel image, which are "rom" */
        memcpy(rom_ptr(INITRD_PARM_START), &initrd_offset, 8);
        memcpy(rom_ptr(INITRD_PARM_SIZE), &initrd_size, 8);
    }

    if (kernel_cmdline) {
        /* we have to overwrite values in the kernel image, which are "rom" */
        memcpy(rom_ptr(KERN_PARM_AREA), kernel_cmdline,
               strlen(kernel_cmdline) + 1);
    }

    /* Create VirtIO network adapters */
    for(i = 0; i < nb_nics; i++) {
        NICInfo *nd = &nd_table[i];
        DeviceState *dev;

        if (!nd->model) {
            nd->model = g_strdup("virtio");
        }

        if (strcmp(nd->model, "virtio")) {
            fprintf(stderr, "S390 only supports VirtIO nics\n");
            exit(1);
        }

        dev = qdev_create((BusState *)s390_bus, "virtio-net-s390");
        qdev_set_nic_properties(dev, nd);
        qdev_init_nofail(dev);
    }

    /* Create VirtIO disk drives */
    for(i = 0; i < MAX_BLK_DEVS; i++) {
        DriveInfo *dinfo;
        DeviceState *dev;

        dinfo = drive_get(IF_IDE, 0, i);
        if (!dinfo) {
            continue;
        }

        dev = qdev_create((BusState *)s390_bus, "virtio-blk-s390");
        qdev_prop_set_drive_nofail(dev, "drive", dinfo->bdrv);
        qdev_init_nofail(dev);
    }
}
Esempio n. 19
0
void yagl_compiled_transfer_prepare(struct yagl_compiled_transfer *ct)
{
    struct yagl_vector v;
    target_ulong last_page_va = YAGL_TARGET_PAGE_VA(ct->va + ct->len - 1);
    target_ulong cur_va = ct->va;
    uint32_t len = ct->len;
    int i, num_sections;

    YAGL_LOG_FUNC_ENTER(yagl_compiled_transfer_prepare,
                        "va = 0x%X, len = 0x%X, is_write = %u",
                        (uint32_t)ct->va,
                        ct->len,
                        (uint32_t)ct->is_write);

    if (ct->in_list) {
        QLIST_REMOVE(ct, entry);
        ct->in_list = false;
    }

    yagl_vector_init(&v, sizeof(struct yagl_compiled_transfer_section), 0);

    while (len) {
        target_ulong start_page_va = YAGL_TARGET_PAGE_VA(cur_va);
        hwaddr start_page_pa = yagl_pa(start_page_va);
        target_ulong end_page_va;
        struct yagl_compiled_transfer_section section;

        if (!start_page_pa) {
            YAGL_LOG_ERROR("yagl_pa of va 0x%X failed", (uint32_t)start_page_va);
            goto fail;
        }

        end_page_va = start_page_va;

        while (end_page_va < last_page_va) {
            target_ulong next_page_va = end_page_va + TARGET_PAGE_SIZE;
            hwaddr next_page_pa = yagl_pa(next_page_va);

            if (!next_page_pa) {
                YAGL_LOG_ERROR("yagl_pa of va 0x%X failed", (uint32_t)next_page_va);
                goto fail;
            }

            /*
             * If the target pages are not linearly spaced, stop.
             */

            if ((next_page_pa < start_page_pa) ||
                ((next_page_pa - start_page_pa) >
                 (next_page_va - start_page_va))) {
                break;
            }

            end_page_va = next_page_va;
        }

        section.map_len = end_page_va + TARGET_PAGE_SIZE - start_page_va;
        section.map_base = cpu_physical_memory_map(start_page_pa, &section.map_len, 0);

        if (!section.map_base || !section.map_len) {
            YAGL_LOG_ERROR("cpu_physical_memory_map(0x%X, %u) failed",
                           (uint32_t)start_page_pa,
                           (uint32_t)section.map_len);
            goto fail;
        }

        section.len = end_page_va + TARGET_PAGE_SIZE - cur_va;

        if (section.len > len) {
            section.len = len;
        }

        section.base = (char*)section.map_base + YAGL_TARGET_PAGE_OFFSET(cur_va);

        yagl_vector_push_back(&v, &section);

        len -= section.len;
        cur_va += section.len;
    }

    ct->num_sections = yagl_vector_size(&v);
    ct->sections = yagl_vector_detach(&v);

    YAGL_LOG_FUNC_EXIT("num_sections = %d", ct->num_sections);

    return;

fail:
    num_sections = yagl_vector_size(&v);

    for (i = 0; i < num_sections; ++i) {
        struct yagl_compiled_transfer_section *section =
            (struct yagl_compiled_transfer_section*)
                ((char*)yagl_vector_data(&v) +
                 (i * sizeof(struct yagl_compiled_transfer_section)));

        cpu_physical_memory_unmap(section->map_base,
                                  section->map_len,
                                  0,
                                  section->map_len);
    }

    yagl_vector_cleanup(&v);

    YAGL_LOG_FUNC_EXIT(NULL);
}
Esempio n. 20
0
static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
{
    int ret;
    VqInfoBlock info;
    uint8_t status;
    VirtioFeatDesc features;
    void *config;
    hwaddr indicators;
    VqConfigBlock vq_config;
    VirtioCcwDevice *dev = sch->driver_data;
    bool check_len;
    int len;
    hwaddr hw_len;

    if (!dev) {
        return -EINVAL;
    }

    trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
                                   ccw.cmd_code);
    check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));

    /* Look at the command. */
    switch (ccw.cmd_code) {
    case CCW_CMD_SET_VQ:
        if (check_len) {
            if (ccw.count != sizeof(info)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(info)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            info.queue = ldq_phys(ccw.cda);
            info.align = ldl_phys(ccw.cda + sizeof(info.queue));
            info.index = lduw_phys(ccw.cda + sizeof(info.queue)
                                   + sizeof(info.align));
            info.num = lduw_phys(ccw.cda + sizeof(info.queue)
                                 + sizeof(info.align)
                                 + sizeof(info.index));
            ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index,
                                     info.num);
            sch->curr_status.scsw.count = 0;
        }
        break;
    case CCW_CMD_VDEV_RESET:
        virtio_reset(dev->vdev);
        ret = 0;
        break;
    case CCW_CMD_READ_FEAT:
        if (check_len) {
            if (ccw.count != sizeof(features)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(features)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            features.index = ldub_phys(ccw.cda + sizeof(features.features));
            if (features.index < ARRAY_SIZE(dev->host_features)) {
                features.features = dev->host_features[features.index];
            } else {
                /* Return zeroes if the guest supports more feature bits. */
                features.features = 0;
            }
            stl_le_phys(ccw.cda, features.features);
            sch->curr_status.scsw.count = ccw.count - sizeof(features);
            ret = 0;
        }
        break;
    case CCW_CMD_WRITE_FEAT:
        if (check_len) {
            if (ccw.count != sizeof(features)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(features)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            features.index = ldub_phys(ccw.cda + sizeof(features.features));
            features.features = ldl_le_phys(ccw.cda);
            if (features.index < ARRAY_SIZE(dev->host_features)) {
                if (dev->vdev->set_features) {
                    dev->vdev->set_features(dev->vdev, features.features);
                }
                dev->vdev->guest_features = features.features;
            } else {
                /*
                 * If the guest supports more feature bits, assert that it
                 * passes us zeroes for those we don't support.
                 */
                if (features.features) {
                    fprintf(stderr, "Guest bug: features[%i]=%x (expected 0)\n",
                            features.index, features.features);
                    /* XXX: do a unit check here? */
                }
            }
            sch->curr_status.scsw.count = ccw.count - sizeof(features);
            ret = 0;
        }
        break;
    case CCW_CMD_READ_CONF:
        if (check_len) {
            if (ccw.count > dev->vdev->config_len) {
                ret = -EINVAL;
                break;
            }
        }
        len = MIN(ccw.count, dev->vdev->config_len);
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            dev->vdev->get_config(dev->vdev, dev->vdev->config);
            /* XXX config space endianness */
            cpu_physical_memory_write(ccw.cda, dev->vdev->config, len);
            sch->curr_status.scsw.count = ccw.count - len;
            ret = 0;
        }
        break;
    case CCW_CMD_WRITE_CONF:
        if (check_len) {
            if (ccw.count > dev->vdev->config_len) {
                ret = -EINVAL;
                break;
            }
        }
        len = MIN(ccw.count, dev->vdev->config_len);
        hw_len = len;
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            config = cpu_physical_memory_map(ccw.cda, &hw_len, 0);
            if (!config) {
                ret = -EFAULT;
            } else {
                len = hw_len;
                /* XXX config space endianness */
                memcpy(dev->vdev->config, config, len);
                cpu_physical_memory_unmap(config, hw_len, 0, hw_len);
                if (dev->vdev->set_config) {
                    dev->vdev->set_config(dev->vdev, dev->vdev->config);
                }
                sch->curr_status.scsw.count = ccw.count - len;
                ret = 0;
            }
        }
        break;
    case CCW_CMD_WRITE_STATUS:
        if (check_len) {
            if (ccw.count != sizeof(status)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(status)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            status = ldub_phys(ccw.cda);
            virtio_set_status(dev->vdev, status);
            if (dev->vdev->status == 0) {
                virtio_reset(dev->vdev);
            }
            sch->curr_status.scsw.count = ccw.count - sizeof(status);
            ret = 0;
        }
        break;
    case CCW_CMD_SET_IND:
        if (check_len) {
            if (ccw.count != sizeof(indicators)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(indicators)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        indicators = ldq_phys(ccw.cda);
        if (!indicators) {
            ret = -EFAULT;
        } else {
            dev->indicators = indicators;
            sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
            ret = 0;
        }
        break;
    case CCW_CMD_SET_CONF_IND:
        if (check_len) {
            if (ccw.count != sizeof(indicators)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(indicators)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        indicators = ldq_phys(ccw.cda);
        if (!indicators) {
            ret = -EFAULT;
        } else {
            dev->indicators2 = indicators;
            sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
            ret = 0;
        }
        break;
    case CCW_CMD_READ_VQ_CONF:
        if (check_len) {
            if (ccw.count != sizeof(vq_config)) {
                ret = -EINVAL;
                break;
            }
        } else if (ccw.count < sizeof(vq_config)) {
            /* Can't execute command. */
            ret = -EINVAL;
            break;
        }
        if (!ccw.cda) {
            ret = -EFAULT;
        } else {
            vq_config.index = lduw_phys(ccw.cda);
            vq_config.num_max = virtio_queue_get_num(dev->vdev,
                                                     vq_config.index);
            stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max);
            sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
            ret = 0;
        }
        break;
    default:
        ret = -ENOSYS;
        break;
    }
    return ret;
}
Esempio n. 21
0
void framebuffer_update_display(
    DisplayState *ds,
    MemoryRegion *address_space,
    target_phys_addr_t base,
    int cols, /* Width in pixels.  */
    int rows, /* Height in pixels.  */
    int src_width, /* Length of source line, in bytes.  */
    int dest_row_pitch, /* Bytes between adjacent horizontal output pixels.  */
    int dest_col_pitch, /* Bytes between adjacent vertical output pixels.  */
    int invalidate, /* nonzero to redraw the whole image.  */
    drawfn fn,
    void *opaque,
    int *first_row, /* Input and output.  */
    int *last_row /* Output only */)
{
    target_phys_addr_t src_len;
    uint8_t *dest;
    uint8_t *src;
    uint8_t *src_base;
    int first, last = 0;
    int dirty;
    int i;
    ram_addr_t addr;
    MemoryRegionSection mem_section;
    MemoryRegion *mem;

    i = *first_row;
    *first_row = -1;
    src_len = src_width * rows;

    mem_section = memory_region_find(address_space, base, src_len);
    if (mem_section.size != src_len || !memory_region_is_ram(mem_section.mr)) {
        return;
    }
    mem = mem_section.mr;
    assert(mem);
    assert(mem_section.offset_within_address_space == base);

    memory_region_sync_dirty_bitmap(mem);
    src_base = cpu_physical_memory_map(base, &src_len, 0);
    /* If we can't map the framebuffer then bail.  We could try harder,
       but it's not really worth it as dirty flag tracking will probably
       already have failed above.  */
    if (!src_base)
        return;
    if (src_len != src_width * rows) {
        cpu_physical_memory_unmap(src_base, src_len, 0, 0);
        return;
    }
    src = src_base;
    dest = ds_get_data(ds);
    if (dest_col_pitch < 0)
        dest -= dest_col_pitch * (cols - 1);
    if (dest_row_pitch < 0) {
        dest -= dest_row_pitch * (rows - 1);
    }
    first = -1;
    addr = mem_section.offset_within_region;

    addr += i * src_width;
    src += i * src_width;
    dest += i * dest_row_pitch;

    for (; i < rows; i++) {
        dirty = memory_region_get_dirty(mem, addr, src_width,
                                             DIRTY_MEMORY_VGA);
        if (dirty || invalidate) {
            fn(opaque, dest, src, cols, dest_col_pitch);
            if (first == -1)
                first = i;
            last = i;
        }
        addr += src_width;
        src += src_width;
        dest += dest_row_pitch;
    }
    cpu_physical_memory_unmap(src_base, src_len, 0, 0);
    if (first < 0) {
        return;
    }
    memory_region_reset_dirty(mem, mem_section.offset_within_region, src_len,
                              DIRTY_MEMORY_VGA);
    *first_row = first;
    *last_row = last;
    return;
}
Esempio n. 22
0
void framebuffer_update_display(
    DisplayState *ds,
    target_phys_addr_t base,
    int cols, /* Width in pixels.  */
    int rows, /* Leight in pixels.  */
    int src_width, /* Length of source line, in bytes.  */
    int dest_row_pitch, /* Bytes between adjacent horizontal output pixels.  */
    int dest_col_pitch, /* Bytes between adjacent vertical output pixels.  */
    int invalidate, /* nonzero to redraw the whole image.  */
    drawfn fn,
    void *opaque,
    int *first_row, /* Input and output.  */
    int *last_row /* Output only */)
{
    target_phys_addr_t src_len;
    uint8_t *dest;
    uint8_t *src;
    uint8_t *src_base;
    int first, last = 0;
    int dirty;
    int i;
    ram_addr_t addr;
    ram_addr_t pd;
    ram_addr_t pd2;

    i = *first_row;
    *first_row = -1;
    src_len = src_width * rows;

    cpu_physical_sync_dirty_bitmap(base, base + src_len);
    pd = cpu_get_physical_page_desc(base);
    pd2 = cpu_get_physical_page_desc(base + src_len - 1);
    /* We should reall check that this is a continuous ram region.
       Instead we just check that the first and last pages are
       both ram, and the right distance apart.  */
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM
        || (pd2 & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
        return;
    }
    pd = (pd & TARGET_PAGE_MASK) + (base & ~TARGET_PAGE_MASK);
    if (((pd + src_len - 1) & TARGET_PAGE_MASK) != (pd2 & TARGET_PAGE_MASK)) {
        return;
    }

    src_base = cpu_physical_memory_map(base, &src_len, 0);
    /* If we can't map the framebuffer then bail.  We could try harder,
       but it's not really worth it as dirty flag tracking will probably
       already have failed above.  */
    if (!src_base)
        return;
    if (src_len != src_width * rows) {
        cpu_physical_memory_unmap(src_base, src_len, 0, 0);
        return;
    }
    src = src_base;
    dest = ds_get_data(ds);
    if (dest_col_pitch < 0)
        dest -= dest_col_pitch * (cols - 1);
    if (dest_row_pitch < 0) {
        dest -= dest_row_pitch * (rows - 1);
    }
    first = -1;
    addr = pd;

    addr += i * src_width;
    src += i * src_width;
    dest += i * dest_row_pitch;

    for (; i < rows; i++) {
        target_phys_addr_t dirty_offset;
        dirty = 0;
        dirty_offset = 0;
        while (addr + dirty_offset < TARGET_PAGE_ALIGN(addr + src_width)) {
            dirty |= cpu_physical_memory_get_dirty(addr + dirty_offset,
                                                   VGA_DIRTY_FLAG);
            dirty_offset += TARGET_PAGE_SIZE;
        }

        if (dirty || invalidate) {
            fn(opaque, dest, src, cols, dest_col_pitch);
            if (first == -1)
                first = i;
            last = i;
        }
        addr += src_width;
        src += src_width;
        dest += dest_row_pitch;
    }
    cpu_physical_memory_unmap(src_base, src_len, 0, 0);
    if (first < 0) {
        return;
    }
    cpu_physical_memory_reset_dirty(pd, pd + src_len, VGA_DIRTY_FLAG);
    *first_row = first;
    *last_row = last;
    return;
}