static void * uaddr_from_handle(void *field) { struct { void *p; } *hdl = field; void *ptr; /*LINTED: constant in conditional context*/ get_xen_guest_handle(ptr, (*hdl)); return (ptr); }
static int privcmd_HYPERVISOR_memory_op(int cmd, void *arg) { int error = 0; import_export_t op_ie, sub_ie, gpfn_ie, mfn_ie; union { domid_t domid; struct xen_memory_reservation resv; struct xen_machphys_mfn_list xmml; struct xen_add_to_physmap xatp; struct xen_memory_map mm; struct xen_foreign_memory_map fmm; } op_arg; op_ie = sub_ie = gpfn_ie = mfn_ie = null_ie; switch (cmd) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: { ulong_t *taddr; if (import_buffer(&op_ie, arg, &op_arg, sizeof (op_arg.resv), IE_IMPEXP) != 0) return (-X_EFAULT); error = import_handle(&sub_ie, &op_arg.resv.extent_start, (op_arg.resv.nr_extents * sizeof (ulong_t)), IE_IMPEXP); if (error == -X_EFAULT) /*LINTED: constant in conditional context*/ get_xen_guest_handle(taddr, op_arg.resv.extent_start); else taddr = sub_ie.ie_kaddr; switch (cmd) { case XENMEM_increase_reservation: DTRACE_XPV4(increase__reservation__start, domid_t, op_arg.resv.domid, ulong_t, op_arg.resv.nr_extents, uint_t, op_arg.resv.extent_order, ulong_t *, taddr); break; case XENMEM_decrease_reservation: DTRACE_XPV4(decrease__reservation__start, domid_t, op_arg.resv.domid, ulong_t, op_arg.resv.nr_extents, uint_t, op_arg.resv.extent_order, ulong_t *, taddr); break; case XENMEM_populate_physmap: DTRACE_XPV3(populate__physmap__start, domid_t, op_arg.resv.domid, ulong_t, op_arg.resv.nr_extents, ulong_t *, taddr); break; } break; } case XENMEM_maximum_ram_page: break; case XENMEM_current_reservation: case XENMEM_maximum_reservation: case XENMEM_maximum_gpfn: if (import_buffer(&op_ie, arg, &op_arg, sizeof (op_arg.domid), IE_IMPEXP) != 0) return (-X_EFAULT); break; case XENMEM_machphys_mfn_list: { if (import_buffer(&op_ie, arg, &op_arg, sizeof (op_arg.xmml), IE_IMPEXP) != 0) return (-X_EFAULT); error = import_handle(&sub_ie, &op_arg.xmml.extent_start, (op_arg.xmml.max_extents * sizeof (ulong_t)), IE_IMPEXP); break; } case XENMEM_add_to_physmap: if (import_buffer(&op_ie, arg, &op_arg, sizeof (op_arg.xatp), IE_IMPEXP) != 0) return (-X_EFAULT); DTRACE_XPV4(add__to__physmap__start, domid_t, op_arg.xatp.domid, uint_t, op_arg.xatp.space, ulong_t, op_arg.xatp.idx, ulong_t, op_arg.xatp.gpfn); break; case XENMEM_memory_map: case XENMEM_machine_memory_map: { if (import_buffer(&op_ie, arg, &op_arg, sizeof (op_arg.mm), IE_EXPORT) != 0) return (-X_EFAULT); /* * XXPV: ugh. e820entry is packed, but not in the kernel, since * we remove all attributes; seems like this is a nice way to * break mysteriously. */ error = import_handle(&sub_ie, &op_arg.mm.buffer, (op_arg.mm.nr_entries * 20), IE_IMPEXP); break; } case XENMEM_set_memory_map: { struct xen_memory_map *taddr; if (import_buffer(&op_ie, arg, &op_arg, sizeof (op_arg.fmm), IE_IMPORT) != 0) return (-X_EFAULT); /* * As above. */ error = import_handle(&sub_ie, &op_arg.fmm.map.buffer, (op_arg.fmm.map.nr_entries * 20), IE_IMPEXP); if (error == -X_EFAULT) /*LINTED: constant in conditional context*/ get_xen_guest_handle(taddr, op_arg.fmm.map.buffer); else taddr = sub_ie.ie_kaddr; DTRACE_XPV3(set__memory__map__start, domid_t, op_arg.fmm.domid, int, op_arg.fmm.map.nr_entries, struct xen_memory_map *, taddr); break; } default: #ifdef DEBUG printf("unrecognized HYPERVISOR_memory_op %d\n", cmd); #endif return (-X_EINVAL); } if (error == 0) error = HYPERVISOR_memory_op(cmd, (arg == NULL) ? NULL: &op_arg); export_buffer(&op_ie, &error); export_buffer(&sub_ie, &error); export_buffer(&gpfn_ie, &error); export_buffer(&mfn_ie, &error); switch (cmd) { case XENMEM_increase_reservation: DTRACE_XPV1(increase__reservation__end, int, error); break; case XENMEM_decrease_reservation: DTRACE_XPV1(decrease__reservation__end, int, error); break; case XENMEM_populate_physmap: DTRACE_XPV1(populate__physmap__end, int, error); break; case XENMEM_add_to_physmap: DTRACE_XPV1(add__to__physmap__end, int, error); break; case XENMEM_set_memory_map: DTRACE_XPV1(set__memory__map__end, int, error); break; } return (error); }
int xc_memory_op(int xc_handle, int cmd, void *arg) { DECLARE_HYPERCALL; struct xen_memory_reservation *reservation = arg; struct xen_machphys_mfn_list *xmml = arg; xen_pfn_t *extent_start; long ret = -EINVAL; hypercall.op = __HYPERVISOR_memory_op; hypercall.arg[0] = (unsigned long)cmd; hypercall.arg[1] = (unsigned long)arg; switch ( cmd ) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: if ( lock_pages(reservation, sizeof(*reservation)) != 0 ) { PERROR("Could not lock"); goto out1; } get_xen_guest_handle(extent_start, reservation->extent_start); if ( (extent_start != NULL) && (lock_pages(extent_start, reservation->nr_extents * sizeof(xen_pfn_t)) != 0) ) { PERROR("Could not lock"); unlock_pages(reservation, sizeof(*reservation)); goto out1; } break; case XENMEM_machphys_mfn_list: if ( lock_pages(xmml, sizeof(*xmml)) != 0 ) { PERROR("Could not lock"); goto out1; } get_xen_guest_handle(extent_start, xmml->extent_start); if ( lock_pages(extent_start, xmml->max_extents * sizeof(xen_pfn_t)) != 0 ) { PERROR("Could not lock"); unlock_pages(xmml, sizeof(*xmml)); goto out1; } break; case XENMEM_add_to_physmap: if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) ) { PERROR("Could not lock"); goto out1; } break; case XENMEM_current_reservation: case XENMEM_maximum_reservation: case XENMEM_maximum_gpfn: if ( lock_pages(arg, sizeof(domid_t)) ) { PERROR("Could not lock"); goto out1; } break; } ret = do_xen_hypercall(xc_handle, &hypercall); switch ( cmd ) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: unlock_pages(reservation, sizeof(*reservation)); get_xen_guest_handle(extent_start, reservation->extent_start); if ( extent_start != NULL ) unlock_pages(extent_start, reservation->nr_extents * sizeof(xen_pfn_t)); break; case XENMEM_machphys_mfn_list: unlock_pages(xmml, sizeof(*xmml)); get_xen_guest_handle(extent_start, xmml->extent_start); unlock_pages(extent_start, xmml->max_extents * sizeof(xen_pfn_t)); break; case XENMEM_add_to_physmap: unlock_pages(arg, sizeof(struct xen_add_to_physmap)); break; case XENMEM_current_reservation: case XENMEM_maximum_reservation: case XENMEM_maximum_gpfn: unlock_pages(arg, sizeof(domid_t)); break; } out1: return ret; }
static int fake_xen_sysctl(int handle, struct xen_sysctl *sysctl) { #define SYSCTLcmd "sysctl" switch (sysctl->cmd) { case XEN_SYSCTL_getdomaininfolist: { xc_domaininfo_t *info; int num, i; get_xen_guest_handle(info, sysctl->u.getdomaininfolist.buffer); marshall_command(handle, "%s,%d,%d,%d\n", SYSCTLcmd, sysctl->cmd, sysctl->u.getdomaininfolist.first_domain, sysctl->u.getdomaininfolist.max_domains); num = unmarshall_int(handle); for (i = 0; i < num; i++) { int uuid[16], j, flags; char **ret; ret = unmarshall_multiple(handle); if (!ret) return -EBADF; /* domid,uuid,flags */ info->domain = atoi(ret[0]); parse_uuid(ret[1], uuid); for (j = 0; j < 16; j++) info->handle[j] = uuid[j] & 0xff; flags = atoi(ret[2]); info->flags = 0; if (flags & 0x1) info->flags |= XEN_DOMINF_dying; if (flags & 0x2) info->flags |= XEN_DOMINF_shutdown; if (flags & 0x4) info->flags |= XEN_DOMINF_paused; if (flags & 0x8) info->flags |= XEN_DOMINF_blocked; if (flags & 0x10) info->flags |= XEN_DOMINF_running; if (flags & 0x20) info->flags |= XEN_DOMINF_hvm_guest; info->flags |= ((flags >> 8) & 0xff) << XEN_DOMINF_shutdownshift; info->nr_online_vcpus = atoi(ret[3]); info->max_vcpu_id = atoi(ret[4]); info->tot_pages = atoi(ret[5]); info->max_pages = atoi(ret[6]); info->shared_info_frame = atoi(ret[7]); info->cpu_time = atoi(ret[8]); info->ssidref = atoi(ret[9]); string_split_free(ret); info++; } sysctl->u.getdomaininfolist.num_domains = num; return unmarshall_return(handle); } case XEN_SYSCTL_readconsole: case XEN_SYSCTL_debug_keys: return 0; case XEN_SYSCTL_physinfo: { char **ret; int sockets_per_node; marshall_command(handle, "%s,%d\n", SYSCTLcmd, sysctl->cmd); ret = unmarshall_multiple(handle); if (!ret) return -EBADF; sockets_per_node = atoi(ret[2]); sysctl->u.physinfo.threads_per_core = atoi(ret[0]); sysctl->u.physinfo.cores_per_socket = atoi(ret[1]); #if XEN_SYSCTL_INTERFACE_VERSION < 6 sysctl->u.physinfo.sockets_per_node = sockets_per_node; #endif sysctl->u.physinfo.nr_nodes = atoi(ret[3]); #if XEN_SYSCTL_INTERFACE_VERSION >= 6 sysctl->u.physinfo.nr_cpus = sysctl->u.physinfo.threads_per_core * sysctl->u.physinfo.cores_per_socket * sockets_per_node * sysctl->u.physinfo.nr_nodes; #endif sysctl->u.physinfo.cpu_khz = atoi(ret[4]); sysctl->u.physinfo.total_pages = atoi(ret[5]); sysctl->u.physinfo.free_pages = atoi(ret[6]); sysctl->u.physinfo.scrub_pages = 0; string_split_free(ret); return unmarshall_return(handle); } case XEN_SYSCTL_getcpuinfo: { uint64_t *info; int num, i; get_xen_guest_handle(info, sysctl->u.getcpuinfo.info); marshall_command(handle, "%s,%d,%d\n", SYSCTLcmd, sysctl->cmd, sysctl->u.getcpuinfo.max_cpus); num = unmarshall_int(handle); for (i = 0; i < num; i++) { info[i] = unmarshall_int64(handle); } return unmarshall_return(handle); } case XEN_SYSCTL_sched_id: return 0; default: return -EINVAL; } return 0; }