int xc_domain_memory_increase_reservation(int xc_handle, uint32_t domid, unsigned long nr_extents, unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { .nr_extents = nr_extents, .extent_order = extent_order, .mem_flags = mem_flags, .domid = domid }; /* may be NULL */ set_xen_guest_handle(reservation.extent_start, extent_start); err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation); if ( err == nr_extents ) return 0; if ( err >= 0 ) { DPRINTF("Failed allocation for dom %d: " "%ld extents of order %d, mem_flags %x\n", domid, nr_extents, extent_order, mem_flags); errno = ENOMEM; err = -1; } return err; }
static void* xc_ia64_map_foreign_p2m(int xc_handle, uint32_t dom, struct xen_ia64_memmap_info *memmap_info, unsigned long flags, unsigned long *p2m_size_p) { unsigned long gpfn_max; unsigned long p2m_size; void *addr; privcmd_hypercall_t hypercall; int ret; int saved_errno; gpfn_max = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom); if (gpfn_max < 0) return NULL; p2m_size = (((gpfn_max + 1) + PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT; addr = mmap(NULL, p2m_size, PROT_READ, MAP_SHARED, xc_handle, 0); if (addr == MAP_FAILED) return NULL; hypercall.op = __HYPERVISOR_ia64_dom0vp_op; hypercall.arg[0] = IA64_DOM0VP_expose_foreign_p2m; hypercall.arg[1] = (unsigned long)addr; hypercall.arg[2] = dom; hypercall.arg[3] = (unsigned long)memmap_info; hypercall.arg[4] = flags; if (lock_pages(memmap_info, sizeof(*memmap_info) + memmap_info->efi_memmap_size) != 0) { saved_errno = errno; munmap(addr, p2m_size); errno = saved_errno; return NULL; } ret = do_xen_hypercall(xc_handle, &hypercall); saved_errno = errno; unlock_pages(memmap_info, sizeof(*memmap_info) + memmap_info->efi_memmap_size); if (ret < 0) { munmap(addr, p2m_size); errno = saved_errno; return NULL; } *p2m_size_p = p2m_size; return addr; }
int xc_domain_set_memmap_limit(int xc_handle, uint32_t domid, unsigned long map_limitkb) { int rc; struct xen_foreign_memory_map fmap = { .domid = domid, .map = { .nr_entries = 1 } }; struct e820entry e820 = { .addr = 0, .size = (uint64_t)map_limitkb << 10, .type = E820_RAM }; set_xen_guest_handle(fmap.map.buffer, &e820); if ( lock_pages(&fmap, sizeof(fmap)) || lock_pages(&e820, sizeof(e820)) ) { PERROR("Could not lock memory for Xen hypercall"); rc = -1; goto out; } rc = xc_memory_op(xc_handle, XENMEM_set_memory_map, &fmap); out: unlock_pages(&fmap, sizeof(fmap)); unlock_pages(&e820, sizeof(e820)); return rc; } #else int xc_domain_set_memmap_limit(int xc_handle, uint32_t domid, unsigned long map_limitkb) { PERROR("Function not implemented"); errno = ENOSYS; return -1; }
static int xc_domain_memory_pod_target(int xc_handle, int op, uint32_t domid, uint64_t target_pages, uint64_t *tot_pages, uint64_t *pod_cache_pages, uint64_t *pod_entries) { int err; struct xen_pod_target pod_target = { .domid = domid, .target_pages = target_pages }; err = xc_memory_op(xc_handle, op, &pod_target); if ( err < 0 ) { DPRINTF("Failed %s_memory_target dom %d\n", (op==XENMEM_set_pod_target)?"set":"get", domid); errno = -err; err = -1; } else err = 0; if ( tot_pages ) *tot_pages = pod_target.tot_pages; if ( pod_cache_pages ) *pod_cache_pages = pod_target.pod_cache_pages; if ( pod_entries ) *pod_entries = pod_target.pod_entries; return err; }
int xc_domain_memory_decrease_reservation(int xc_handle, uint32_t domid, unsigned long nr_extents, unsigned int extent_order, xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { .nr_extents = nr_extents, .extent_order = extent_order, .mem_flags = 0, .domid = domid }; set_xen_guest_handle(reservation.extent_start, extent_start); if ( extent_start == NULL ) { DPRINTF("decrease_reservation extent_start is NULL!\n"); errno = EINVAL; return -1; } err = xc_memory_op(xc_handle, XENMEM_decrease_reservation, &reservation); if ( err == nr_extents ) return 0; if ( err >= 0 ) { DPRINTF("Failed deallocation for dom %d: %ld extents of order %d\n", domid, nr_extents, extent_order); errno = EINVAL; err = -1; } return err; } int xc_domain_memory_populate_physmap(int xc_handle, uint32_t domid, unsigned long nr_extents, unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { .nr_extents = nr_extents, .extent_order = extent_order, .mem_flags = mem_flags, .domid = domid }; set_xen_guest_handle(reservation.extent_start, extent_start); err = xc_memory_op(xc_handle, XENMEM_populate_physmap, &reservation); if ( err == nr_extents ) return 0; if ( err >= 0 ) { DPRINTF("Failed allocation for dom %d: %ld extents of order %d\n", domid, nr_extents, extent_order); errno = EBUSY; err = -1; } return err; }
int xc_ia64_copy_memmap(int xc_handle, uint32_t domid, shared_info_t *live_shinfo, xen_ia64_memmap_info_t **memmap_info_p, unsigned long *memmap_info_num_pages_p) { unsigned long gpfn_max_prev; unsigned long gpfn_max_post; unsigned long num_pages; unsigned long num_pages_post; unsigned long memmap_size; xen_ia64_memmap_info_t *memmap_info; int ret; gpfn_max_prev = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid); if (gpfn_max_prev < 0) return -1; again: num_pages = live_shinfo->arch.memmap_info_num_pages; if (num_pages == 0) { ERROR("num_pages 0x%x", num_pages); return -1; } memmap_size = num_pages << PAGE_SHIFT; memmap_info = malloc(memmap_size); if (memmap_info == NULL) return -1; ret = xc_ia64_get_memmap(xc_handle, domid, (char*)memmap_info, memmap_size); if (ret != 0) { free(memmap_info); return -1; } xen_rmb(); num_pages_post = live_shinfo->arch.memmap_info_num_pages; if (num_pages != num_pages_post) { free(memmap_info); num_pages = num_pages_post; goto again; } gpfn_max_post = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid); if (gpfn_max_prev < 0) { free(memmap_info); return -1; } if (gpfn_max_post > gpfn_max_prev) { free(memmap_info); gpfn_max_prev = gpfn_max_post; goto again; } /* reject unknown memmap */ if (memmap_info->efi_memdesc_size != sizeof(efi_memory_desc_t) || (memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size) == 0 || memmap_info->efi_memmap_size > (num_pages << PAGE_SHIFT) - sizeof(memmap_info) || memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION) { PERROR("unknown memmap header. defaulting to compat mode."); free(memmap_info); return -1; } *memmap_info_p = memmap_info; if (memmap_info_num_pages_p != NULL) *memmap_info_num_pages_p = num_pages; return 0; }
void xenstore_process_logdirty_event(void) { char *act; static char *active_path = NULL; static char *next_active_path = NULL; static char *seg = NULL; unsigned int len; int i; if (!seg) { char *path = NULL, *key_ascii, key_terminated[17] = {0,}; key_t key; int shmid; /* Find and map the shared memory segment for log-dirty bitmaps */ if (pasprintf(&path, "/local/domain/0/device-model/%u/logdirty/key", domid) == -1) { fprintf(logfile, "Log-dirty: out of memory\n"); exit(1); } key_ascii = xs_read(xsh, XBT_NULL, path, &len); free(path); if (!key_ascii) /* No key yet: wait for the next watch */ return; strncpy(key_terminated, key_ascii, 16); free(key_ascii); key = (key_t) strtoull(key_terminated, NULL, 16); /* Figure out how bit the log-dirty bitmaps are */ logdirty_bitmap_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid) + 1; logdirty_bitmap_size = ((logdirty_bitmap_size + HOST_LONG_BITS - 1) / HOST_LONG_BITS); /* longs */ logdirty_bitmap_size *= sizeof (unsigned long); /* bytes */ /* Map the shared-memory segment */ fprintf(logfile, "%s: key=%16.16llx size=%lu\n", __FUNCTION__, (unsigned long long)key, logdirty_bitmap_size); shmid = shmget(key, 2 * logdirty_bitmap_size, S_IRUSR|S_IWUSR); if (shmid == -1) { fprintf(logfile, "Log-dirty: shmget failed: segment %16.16llx " "(%s)\n", (unsigned long long)key, strerror(errno)); exit(1); } seg = shmat(shmid, NULL, 0); if (seg == (void *)-1) { fprintf(logfile, "Log-dirty: shmat failed: segment %16.16llx " "(%s)\n", (unsigned long long)key, strerror(errno)); exit(1); } fprintf(logfile, "Log-dirty: mapped segment at %p\n", seg); /* Double-check that the bitmaps are the size we expect */ if (logdirty_bitmap_size != *(uint32_t *)seg) { fprintf(logfile, "Log-dirty: got %u, calc %lu\n", *(uint32_t *)seg, logdirty_bitmap_size); /* Stale key: wait for next watch */ shmdt(seg); seg = NULL; return; } /* Remember the paths for the next-active and active entries */ if (pasprintf(&active_path, "/local/domain/0/device-model/%u/logdirty/active", domid) == -1) { fprintf(logfile, "Log-dirty: out of memory\n"); exit(1); } if (pasprintf(&next_active_path, "/local/domain/0/device-model/%u/logdirty/next-active", domid) == -1) { fprintf(logfile, "Log-dirty: out of memory\n"); exit(1); } } fprintf(logfile, "Triggered log-dirty buffer switch\n"); /* Read the required active buffer from the store */ act = xs_read(xsh, XBT_NULL, next_active_path, &len); if (!act) { fprintf(logfile, "Log-dirty: can't read next-active\n"); exit(1); } /* Switch buffers */ i = act[0] - '0'; if (i != 0 && i != 1) { fprintf(logfile, "Log-dirty: bad next-active entry: %s\n", act); exit(1); } logdirty_bitmap = (unsigned long *)(seg + i * logdirty_bitmap_size); /* Ack that we've switched */ xs_write(xsh, XBT_NULL, active_path, act, len); free(act); }