static void nvdimm_realize(PCDIMMDevice *dimm, Error **errp) { MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem, errp); NVDIMMDevice *nvdimm = NVDIMM(dimm); uint64_t align, pmem_size, size = memory_region_size(mr); align = memory_region_get_alignment(mr); pmem_size = size - nvdimm->label_size; nvdimm->label_data = memory_region_get_ram_ptr(mr) + pmem_size; pmem_size = QEMU_ALIGN_DOWN(pmem_size, align); if (size <= nvdimm->label_size || !pmem_size) { HostMemoryBackend *hostmem = dimm->hostmem; char *path = object_get_canonical_path_component(OBJECT(hostmem)); error_setg(errp, "the size of memdev %s (0x%" PRIx64 ") is too " "small to contain nvdimm label (0x%" PRIx64 ") and " "aligned PMEM (0x%" PRIx64 ")", path, memory_region_size(mr), nvdimm->label_size, align); g_free(path); return; } memory_region_init_alias(&nvdimm->nvdimm_mr, OBJECT(dimm), "nvdimm-memory", mr, 0, pmem_size); nvdimm->nvdimm_mr.align = align; }
/* Round offset and/or bytes to target cluster if COW is needed, and * return the offset of the adjusted tail against original. */ static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, uint64_t *bytes) { bool need_cow; int ret = 0; int64_t align_offset = *offset; int64_t align_bytes = *bytes; int max_bytes = s->granularity * s->max_iov; need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, s->cow_bitmap); if (need_cow) { bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, &align_offset, &align_bytes); } if (align_bytes > max_bytes) { align_bytes = max_bytes; if (need_cow) { align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); } } /* Clipping may result in align_bytes unaligned to chunk boundary, but * that doesn't matter because it's already the end of source image. */ align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); ret = align_offset + align_bytes - (*offset + *bytes); *offset = align_offset; *bytes = align_bytes; assert(ret >= 0); return ret; }
static void qcow2_cache_table_release(BlockDriverState *bs, Qcow2Cache *c, int i, int num_tables) { #if QEMU_MADV_DONTNEED != QEMU_MADV_INVALID BDRVQcow2State *s = bs->opaque; void *t = qcow2_cache_get_table_addr(bs, c, i); int align = getpagesize(); size_t mem_size = (size_t) s->cluster_size * num_tables; size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t; size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align); if (length > 0) { qemu_madvise((uint8_t *) t + offset, length, QEMU_MADV_DONTNEED); } #endif }
/* * Check if the contents of the iovecs are all zero */ bool qemu_iovec_is_zero(QEMUIOVector *qiov) { int i; for (i = 0; i < qiov->niov; i++) { size_t offs = QEMU_ALIGN_DOWN(qiov->iov[i].iov_len, 4 * sizeof(long)); uint8_t *ptr = qiov->iov[i].iov_base; if (offs && !buffer_is_zero(qiov->iov[i].iov_base, offs)) { return false; } for (; offs < qiov->iov[i].iov_len; offs++) { if (ptr[offs]) { return false; } } } return true; }
static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) { int64_t offset; BlockDriverState *base = s->base; BlockDriverState *bs = s->source; BlockDriverState *target_bs = blk_bs(s->target); int ret; int64_t count; if (base == NULL && !bdrv_has_zero_init(target_bs)) { if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); return 0; } s->initial_zeroing_ongoing = true; for (offset = 0; offset < s->bdev_length; ) { int bytes = MIN(s->bdev_length - offset, QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); mirror_throttle(s); if (block_job_is_cancelled(&s->common)) { s->initial_zeroing_ongoing = false; return 0; } if (s->in_flight >= MAX_IN_FLIGHT) { trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, s->in_flight); mirror_wait_for_io(s); continue; } mirror_do_zero_or_discard(s, offset, bytes, false); offset += bytes; } mirror_wait_for_all_io(s); s->initial_zeroing_ongoing = false; } /* First part, loop on the sectors and initialize the dirty bitmap. */ for (offset = 0; offset < s->bdev_length; ) { /* Just to make sure we are not exceeding int limit. */ int bytes = MIN(s->bdev_length - offset, QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); mirror_throttle(s); if (block_job_is_cancelled(&s->common)) { return 0; } ret = bdrv_is_allocated_above(bs, base, offset, bytes, &count); if (ret < 0) { return ret; } assert(count); if (ret == 1) { bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); } offset += count; } return 0; }