示例#1
0
static void tcx24_set_dirty(TCXState *s)
{
    unsigned int i;

    for (i = 0; i < MAXX * MAXY * 4; i += TARGET_PAGE_SIZE) {
        cpu_physical_memory_set_dirty(s->vram24_offset + i);
        cpu_physical_memory_set_dirty(s->cplane_offset + i);
    }
}
示例#2
0
static void tcx24_invalidate_display(void *opaque)
{
    TCXState *s = opaque;
    int i;

    tcx_invalidate_display(s);
    for (i = 0; i < MAXX*MAXY * 4; i += TARGET_PAGE_SIZE) {
        cpu_physical_memory_set_dirty(s->vram24_offset + i);
        cpu_physical_memory_set_dirty(s->cplane_offset + i);
    }
}
示例#3
0
static void vhost_dev_sync_region(struct vhost_dev *dev,
                                  uint64_t mfirst, uint64_t mlast,
                                  uint64_t rfirst, uint64_t rlast)
{
    uint64_t start = MAX(mfirst, rfirst);
    uint64_t end = MIN(mlast, rlast);
    vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
    vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
    uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;

    if (end < start) {
        return;
    }
    assert(end / VHOST_LOG_CHUNK < dev->log_size);

    for (;from < to; ++from) {
        vhost_log_chunk_t log;
        int bit;
        /* We first check with non-atomic: much cheaper,
         * and we expect non-dirty to be the common case. */
        if (!*from) {
            addr += VHOST_LOG_CHUNK;
            continue;
        }
        /* Data must be read atomically. We don't really
         * need the barrier semantics of __sync
         * builtins, but it's easier to use them than
         * roll our own. */
        log = __sync_fetch_and_and(from, 0);
        while ((bit = sizeof(log) > sizeof(int) ?
                ffsll(log) : ffs(log))) {
            ram_addr_t ram_addr;
            bit -= 1;
            ram_addr = cpu_get_physical_page_desc(addr + bit * VHOST_LOG_PAGE);
            cpu_physical_memory_set_dirty(ram_addr);
            log &= ~(0x1ull << bit);
        }
        addr += VHOST_LOG_CHUNK;
    }
}
示例#4
0
int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
{
    ram_addr_t addr;
    uint64_t bytes_transferred_last;
    double bwidth = 0;
    uint64_t expected_time = 0;

    if (stage < 0) {
        cpu_physical_memory_set_dirty_tracking(0);
        return 0;
    }

    if (cpu_physical_sync_dirty_bitmap(0, TARGET_PHYS_ADDR_MAX) != 0) {
        qemu_file_set_error(f);
        return 0;
    }

    if (stage == 1) {
        bytes_transferred = 0;

        /* Make sure all dirty bits are set */
        for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
            if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
                cpu_physical_memory_set_dirty(addr);
            }
        }

        /* Enable dirty memory tracking */
        cpu_physical_memory_set_dirty_tracking(1);

        qemu_put_be64(f, last_ram_offset | RAM_SAVE_FLAG_MEM_SIZE);
    }

    bytes_transferred_last = bytes_transferred;
    bwidth = qemu_get_clock_ns(rt_clock);

    while (!qemu_file_rate_limit(f)) {
        int ret;

        ret = ram_save_block(f);
        bytes_transferred += ret * TARGET_PAGE_SIZE;
        if (ret == 0) { /* no more blocks */
            break;
        }
    }

    bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
    bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;

    /* if we haven't transferred anything this round, force expected_time to a
     * a very high value, but without crashing */
    if (bwidth == 0) {
        bwidth = 0.000001;
    }

    /* try transferring iterative blocks of memory */
    if (stage == 3) {
        /* flush all remaining blocks regardless of rate limiting */
        while (ram_save_block(f) != 0) {
            bytes_transferred += TARGET_PAGE_SIZE;
        }
        cpu_physical_memory_set_dirty_tracking(0);
    }

    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

    expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;

    return (stage == 2) && (expected_time <= migrate_max_downtime());
}