static int __hypercall_perform(unsigned long cmd, unsigned long *arr) { int ret; xc_interface *xch = xc_interface_open(0, 0, 0); DECLARE_HYPERCALL; if (xch == NULL) goto err; hypercall.op = __HYPERVISOR_xen_version; hypercall.arg[0] = cmd; hypercall.arg[1] = (unsigned long) arr; ret = do_xen_hypercall(xch, &hypercall); xc_interface_close(xch); if (ret != 0) goto err; out: return ret; err: ret = -1; goto out; }
int xc_domain_shutdown(xc_interface *xch, uint32_t domid, int reason) { int ret = -1; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(sched_remote_shutdown_t, arg); arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); if ( arg == NULL ) { PERROR("Could not allocate memory for xc_domain_shutdown hypercall"); goto out1; } hypercall.op = __HYPERVISOR_sched_op; hypercall.arg[0] = (unsigned long)SCHEDOP_remote_shutdown; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); arg->domain_id = domid; arg->reason = reason; ret = do_xen_hypercall(xch, &hypercall); xc_hypercall_buffer_free(xch, arg); out1: return ret; }
int xc_watchdog(xc_interface *xch, uint32_t id, uint32_t timeout) { int ret = -1; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(sched_watchdog_t, arg); arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); if ( arg == NULL ) { PERROR("Could not allocate memory for xc_watchdog hypercall"); goto out1; } hypercall.op = __HYPERVISOR_sched_op; hypercall.arg[0] = (unsigned long)SCHEDOP_watchdog; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); arg->id = id; arg->timeout = timeout; ret = do_xen_hypercall(xch, &hypercall); xc_hypercall_buffer_free(xch, arg); out1: return ret; }
int xc_hvm_set_pci_link_route( int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq) { DECLARE_HYPERCALL; struct xen_hvm_set_pci_link_route arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_pci_link_route; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.link = link; arg.isa_irq = isa_irq; if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_kexec_get_range(xc_interface *xch, int range, int nr, uint64_t *size, uint64_t *start) { DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(xen_kexec_range_t, get_range); int ret = -1; get_range = xc_hypercall_buffer_alloc(xch, get_range, sizeof(*get_range)); if ( get_range == NULL ) { PERROR("Could not alloc bounce buffer for kexec_get_range hypercall"); goto out; } get_range->range = range; get_range->nr = nr; hypercall.op = __HYPERVISOR_kexec_op; hypercall.arg[0] = KEXEC_CMD_kexec_get_range; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(get_range); ret = do_xen_hypercall(xch, &hypercall); *size = get_range->size; *start = get_range->start; out: xc_hypercall_buffer_free(xch, get_range); return ret; }
int xc_kexec_exec(xc_interface *xch, int type) { DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(xen_kexec_exec_t, exec); int ret = -1; exec = xc_hypercall_buffer_alloc(xch, exec, sizeof(*exec)); if ( exec == NULL ) { PERROR("Count not alloc bounce buffer for kexec_exec hypercall"); goto out; } exec->type = type; hypercall.op = __HYPERVISOR_kexec_op; hypercall.arg[0] = KEXEC_CMD_kexec; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(exec); ret = do_xen_hypercall(xch, &hypercall); out: xc_hypercall_buffer_free(xch, exec); return ret; }
static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu) { int err = 0; DECLARE_HYPERCALL; DECLARE_NAMED_HYPERCALL_BOUNCE(updates, mmu->updates, mmu->idx*sizeof(*mmu->updates), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); if ( mmu->idx == 0 ) return 0; if ( xc_hypercall_bounce_pre(xch, updates) ) { PERROR("flush_mmu_updates: bounce buffer failed"); err = 1; goto out; } hypercall.op = __HYPERVISOR_mmu_update; hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(updates); hypercall.arg[1] = (unsigned long)mmu->idx; hypercall.arg[2] = 0; hypercall.arg[3] = mmu->subject; if ( do_xen_hypercall(xch, &hypercall) < 0 ) { ERROR("Failure when submitting mmu updates"); err = 1; } mmu->idx = 0; xc_hypercall_bounce_post(xch, updates); out: return err; }
int xc_mmuext_op( xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops, domid_t dom) { DECLARE_HYPERCALL; DECLARE_HYPERCALL_BOUNCE(op, nr_ops*sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); long ret = -EINVAL; if ( xc_hypercall_bounce_pre(xch, op) ) { PERROR("Could not bounce memory for mmuext op hypercall"); goto out1; } hypercall.op = __HYPERVISOR_mmuext_op; hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(op); hypercall.arg[1] = (unsigned long)nr_ops; hypercall.arg[2] = (unsigned long)0; hypercall.arg[3] = (unsigned long)dom; ret = do_xen_hypercall(xch, &hypercall); xc_hypercall_bounce_post(xch, op); out1: return ret; }
int xc_hvm_set_isa_irq_level( int xc_handle, domid_t dom, uint8_t isa_irq, unsigned int level) { DECLARE_HYPERCALL; struct xen_hvm_set_isa_irq_level arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_isa_irq_level; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.isa_irq = isa_irq; arg.level = level; if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_hvm_set_mem_type( int xc_handle, domid_t dom, hvmmem_type_t mem_type, uint64_t first_pfn, uint64_t nr) { DECLARE_HYPERCALL; struct xen_hvm_set_mem_type arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_mem_type; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.hvmmem_type = mem_type; arg.first_pfn = first_pfn; arg.nr = nr; if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_hvm_set_pci_intx_level( int xc_handle, domid_t dom, uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx, unsigned int level) { DECLARE_HYPERCALL; struct xen_hvm_set_pci_intx_level arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_pci_intx_level; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.domain = domain; arg.bus = bus; arg.device = device; arg.intx = intx; arg.level = level; if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
int xc_domain_shutdown(int xc_handle, uint32_t domid, int reason) { int ret = -1; sched_remote_shutdown_t arg; DECLARE_HYPERCALL; hypercall.op = __HYPERVISOR_sched_op; hypercall.arg[0] = (unsigned long)SCHEDOP_remote_shutdown; hypercall.arg[1] = (unsigned long)&arg; arg.domain_id = domid; arg.reason = reason; if ( lock_pages(&arg, sizeof(arg)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; } ret = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); out1: return ret; }
int xc_kexec_unload(xc_interface *xch, int type) { DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(xen_kexec_unload_t, unload); int ret = -1; unload = xc_hypercall_buffer_alloc(xch, unload, sizeof(*unload)); if ( unload == NULL ) { PERROR("Count not alloc buffer for kexec unload hypercall"); goto out; } unload->type = type; hypercall.op = __HYPERVISOR_kexec_op; hypercall.arg[0] = KEXEC_CMD_kexec_unload; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(unload); ret = do_xen_hypercall(xch, &hypercall); out: xc_hypercall_buffer_free(xch, unload); return ret; }
int xc_flask_op(xc_interface *xch, xen_flask_op_t *op) { int ret = -1; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BOUNCE(op, sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); op->interface_version = XEN_FLASK_INTERFACE_VERSION; if ( xc_hypercall_bounce_pre(xch, op) ) { PERROR("Could not bounce memory for flask op hypercall"); goto out; } hypercall.op = __HYPERVISOR_xsm_op; hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(op); if ( (ret = do_xen_hypercall(xch, &hypercall)) < 0 ) { if ( errno == EACCES ) fprintf(stderr, "XSM operation failed!\n"); } xc_hypercall_bounce_post(xch, op); out: return ret; }
int xc_hvm_track_dirty_vram( int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr, unsigned long *dirty_bitmap) { DECLARE_HYPERCALL; struct xen_hvm_track_dirty_vram arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_track_dirty_vram; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.first_pfn = first_pfn; arg.nr = nr; set_xen_guest_handle(arg.dirty_bitmap, (uint8_t *)dirty_bitmap); if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) { PERROR("Could not lock memory"); return rc; } rc = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; }
static int flush_mmu_updates(int xc_handle, struct xc_mmu *mmu) { int err = 0; DECLARE_HYPERCALL; if ( mmu->idx == 0 ) return 0; hypercall.op = __HYPERVISOR_mmu_update; hypercall.arg[0] = (unsigned long)mmu->updates; hypercall.arg[1] = (unsigned long)mmu->idx; hypercall.arg[2] = 0; hypercall.arg[3] = mmu->subject; if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 ) { PERROR("flush_mmu_updates: mmu updates lock_pages failed"); err = 1; goto out; } if ( do_xen_hypercall(xc_handle, &hypercall) < 0 ) { ERROR("Failure when submitting mmu updates"); err = 1; } mmu->idx = 0; unlock_pages(mmu->updates, sizeof(mmu->updates)); out: return err; }
int xc_mmuext_op( int xc_handle, struct mmuext_op *op, unsigned int nr_ops, domid_t dom) { DECLARE_HYPERCALL; long ret = -EINVAL; hypercall.op = __HYPERVISOR_mmuext_op; hypercall.arg[0] = (unsigned long)op; hypercall.arg[1] = (unsigned long)nr_ops; hypercall.arg[2] = (unsigned long)0; hypercall.arg[3] = (unsigned long)dom; if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; } ret = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(op, nr_ops*sizeof(*op)); out1: return ret; }
static void* xc_ia64_map_foreign_p2m(int xc_handle, uint32_t dom, struct xen_ia64_memmap_info *memmap_info, unsigned long flags, unsigned long *p2m_size_p) { unsigned long gpfn_max; unsigned long p2m_size; void *addr; privcmd_hypercall_t hypercall; int ret; int saved_errno; gpfn_max = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom); if (gpfn_max < 0) return NULL; p2m_size = (((gpfn_max + 1) + PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT; addr = mmap(NULL, p2m_size, PROT_READ, MAP_SHARED, xc_handle, 0); if (addr == MAP_FAILED) return NULL; hypercall.op = __HYPERVISOR_ia64_dom0vp_op; hypercall.arg[0] = IA64_DOM0VP_expose_foreign_p2m; hypercall.arg[1] = (unsigned long)addr; hypercall.arg[2] = dom; hypercall.arg[3] = (unsigned long)memmap_info; hypercall.arg[4] = flags; if (lock_pages(memmap_info, sizeof(*memmap_info) + memmap_info->efi_memmap_size) != 0) { saved_errno = errno; munmap(addr, p2m_size); errno = saved_errno; return NULL; } ret = do_xen_hypercall(xc_handle, &hypercall); saved_errno = errno; unlock_pages(memmap_info, sizeof(*memmap_info) + memmap_info->efi_memmap_size); if (ret < 0) { munmap(addr, p2m_size); errno = saved_errno; return NULL; } *p2m_size_p = p2m_size; return addr; }
int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value) { DECLARE_HYPERCALL; xen_hvm_param_t arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_get_param; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.index = param; if ( lock_pages(&arg, sizeof(arg)) != 0 ) return -1; rc = do_xen_hypercall(handle, &hypercall); unlock_pages(&arg, sizeof(arg)); *value = arg.value; return rc; }
/* It is possible to get memmap_info and memmap by foreign domain page mapping. But it's racy. Use hypercall to avoid race. */ static int xc_ia64_get_memmap(int xc_handle, uint32_t domid, char *buf, unsigned long bufsize) { privcmd_hypercall_t hypercall; int ret; hypercall.op = __HYPERVISOR_ia64_dom0vp_op; hypercall.arg[0] = IA64_DOM0VP_get_memmap; hypercall.arg[1] = domid; hypercall.arg[2] = (unsigned long)buf; hypercall.arg[3] = bufsize; hypercall.arg[4] = 0; if (lock_pages(buf, bufsize) != 0) return -1; ret = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(buf, bufsize); return ret; }
int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch, uint64_t entry_maddr, uint32_t nr_segments, xen_kexec_segment_t *segments) { int ret = -1; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BOUNCE(segments, sizeof(*segments) * nr_segments, XC_HYPERCALL_BUFFER_BOUNCE_IN); DECLARE_HYPERCALL_BUFFER(xen_kexec_load_t, load); if ( xc_hypercall_bounce_pre(xch, segments) ) { PERROR("Could not allocate bounce buffer for kexec load hypercall"); goto out; } load = xc_hypercall_buffer_alloc(xch, load, sizeof(*load)); if ( load == NULL ) { PERROR("Could not allocate buffer for kexec load hypercall"); goto out; } load->type = type; load->arch = arch; load->entry_maddr = entry_maddr; load->nr_segments = nr_segments; set_xen_guest_handle(load->segments.h, segments); hypercall.op = __HYPERVISOR_kexec_op; hypercall.arg[0] = KEXEC_CMD_kexec_load; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(load); ret = do_xen_hypercall(xch, &hypercall); out: xc_hypercall_buffer_free(xch, load); xc_hypercall_bounce_post(xch, segments); return ret; }
int do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len) { DECLARE_HYPERCALL; DECLARE_HYPERCALL_BOUNCE(arg, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH); long ret = -EINVAL; if ( xc_hypercall_bounce_pre(xch, arg) ) { PERROR("Could not bounce memory for XENMEM hypercall"); goto out1; } hypercall.op = __HYPERVISOR_memory_op; hypercall.arg[0] = (unsigned long) cmd; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); ret = do_xen_hypercall(xch, &hypercall); xc_hypercall_bounce_post(xch, arg); out1: return ret; }
int xc_gnttab_op(xc_interface *xch, int cmd, void * op, int op_size, int count) { int ret = 0; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BOUNCE(op, count * op_size, XC_HYPERCALL_BUFFER_BOUNCE_BOTH); if ( xc_hypercall_bounce_pre(xch, op) ) { PERROR("Could not bounce buffer for grant table op hypercall"); goto out1; } hypercall.op = __HYPERVISOR_grant_table_op; hypercall.arg[0] = cmd; hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(op); hypercall.arg[2] = count; ret = do_xen_hypercall(xch, &hypercall); xc_hypercall_bounce_post(xch, op); out1: return ret; }
int xc_acm_op(xc_interface *xch, int cmd, void *arg, unsigned long arg_size) { int ret; DECLARE_HYPERCALL; DECLARE_HYPERCALL_BUFFER(struct xen_acmctl, acmctl); acmctl = xc_hypercall_buffer_alloc(xch, acmctl, sizeof(*acmctl)); if ( acmctl == NULL ) { PERROR("Could not allocate memory for ACM OP hypercall"); return -EFAULT; } switch (cmd) { case ACMOP_setpolicy: { struct acm_setpolicy *setpolicy = (struct acm_setpolicy *)arg; memcpy(&acmctl->u.setpolicy, setpolicy, sizeof(struct acm_setpolicy)); } break; case ACMOP_getpolicy: { struct acm_getpolicy *getpolicy = (struct acm_getpolicy *)arg; memcpy(&acmctl->u.getpolicy, getpolicy, sizeof(struct acm_getpolicy)); } break; case ACMOP_dumpstats: { struct acm_dumpstats *dumpstats = (struct acm_dumpstats *)arg; memcpy(&acmctl->u.dumpstats, dumpstats, sizeof(struct acm_dumpstats)); } break; case ACMOP_getssid: { struct acm_getssid *getssid = (struct acm_getssid *)arg; memcpy(&acmctl->u.getssid, getssid, sizeof(struct acm_getssid)); } break; case ACMOP_getdecision: { struct acm_getdecision *getdecision = (struct acm_getdecision *)arg; memcpy(&acmctl->u.getdecision, getdecision, sizeof(struct acm_getdecision)); } break; case ACMOP_chgpolicy: { struct acm_change_policy *change_policy = (struct acm_change_policy *)arg; memcpy(&acmctl->u.change_policy, change_policy, sizeof(struct acm_change_policy)); } break; case ACMOP_relabeldoms: { struct acm_relabel_doms *relabel_doms = (struct acm_relabel_doms *)arg; memcpy(&acmctl->u.relabel_doms, relabel_doms, sizeof(struct acm_relabel_doms)); } break; } acmctl->cmd = cmd; acmctl->interface_version = ACM_INTERFACE_VERSION; hypercall.op = __HYPERVISOR_xsm_op; hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(acmctl); if ( (ret = do_xen_hypercall(xch, &hypercall)) < 0) { if ( errno == EACCES ) DPRINTF("acmctl operation failed -- need to" " rebuild the user-space tool set?\n"); } switch (cmd) { case ACMOP_getdecision: { struct acm_getdecision *getdecision = (struct acm_getdecision *)arg; memcpy(getdecision, &acmctl->u.getdecision, sizeof(struct acm_getdecision)); break; } } xc_hypercall_buffer_free(xch, acmctl); return ret; }
int xc_memory_op(int xc_handle, int cmd, void *arg) { DECLARE_HYPERCALL; struct xen_memory_reservation *reservation = arg; struct xen_machphys_mfn_list *xmml = arg; xen_pfn_t *extent_start; long ret = -EINVAL; hypercall.op = __HYPERVISOR_memory_op; hypercall.arg[0] = (unsigned long)cmd; hypercall.arg[1] = (unsigned long)arg; switch ( cmd ) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: if ( lock_pages(reservation, sizeof(*reservation)) != 0 ) { PERROR("Could not lock"); goto out1; } get_xen_guest_handle(extent_start, reservation->extent_start); if ( (extent_start != NULL) && (lock_pages(extent_start, reservation->nr_extents * sizeof(xen_pfn_t)) != 0) ) { PERROR("Could not lock"); unlock_pages(reservation, sizeof(*reservation)); goto out1; } break; case XENMEM_machphys_mfn_list: if ( lock_pages(xmml, sizeof(*xmml)) != 0 ) { PERROR("Could not lock"); goto out1; } get_xen_guest_handle(extent_start, xmml->extent_start); if ( lock_pages(extent_start, xmml->max_extents * sizeof(xen_pfn_t)) != 0 ) { PERROR("Could not lock"); unlock_pages(xmml, sizeof(*xmml)); goto out1; } break; case XENMEM_add_to_physmap: if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) ) { PERROR("Could not lock"); goto out1; } break; case XENMEM_current_reservation: case XENMEM_maximum_reservation: case XENMEM_maximum_gpfn: if ( lock_pages(arg, sizeof(domid_t)) ) { PERROR("Could not lock"); goto out1; } break; } ret = do_xen_hypercall(xc_handle, &hypercall); switch ( cmd ) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: unlock_pages(reservation, sizeof(*reservation)); get_xen_guest_handle(extent_start, reservation->extent_start); if ( extent_start != NULL ) unlock_pages(extent_start, reservation->nr_extents * sizeof(xen_pfn_t)); break; case XENMEM_machphys_mfn_list: unlock_pages(xmml, sizeof(*xmml)); get_xen_guest_handle(extent_start, xmml->extent_start); unlock_pages(extent_start, xmml->max_extents * sizeof(xen_pfn_t)); break; case XENMEM_add_to_physmap: unlock_pages(arg, sizeof(struct xen_add_to_physmap)); break; case XENMEM_current_reservation: case XENMEM_maximum_reservation: case XENMEM_maximum_gpfn: unlock_pages(arg, sizeof(domid_t)); break; } out1: return ret; }