static int xencommize_grant_table_op(struct xencomm_mini **xc_area, unsigned int cmd, void *op, unsigned int count, struct xencomm_handle **desc) { struct xencomm_handle *desc1; unsigned int argsize; switch (cmd) { case GNTTABOP_map_grant_ref: argsize = sizeof(struct gnttab_map_grant_ref); break; case GNTTABOP_unmap_grant_ref: argsize = sizeof(struct gnttab_unmap_grant_ref); break; case GNTTABOP_setup_table: { struct gnttab_setup_table *setup = op; argsize = sizeof(*setup); if (count != 1) return -EINVAL; desc1 = __xencomm_map_no_alloc (xen_guest_handle(setup->frame_list), setup->nr_frames * sizeof(*xen_guest_handle(setup->frame_list)), *xc_area); if (desc1 == NULL) return -EINVAL; (*xc_area)++; set_xen_guest_handle(setup->frame_list, (void *)desc1); break; } case GNTTABOP_dump_table: argsize = sizeof(struct gnttab_dump_table); break; case GNTTABOP_transfer: argsize = sizeof(struct gnttab_transfer); break; case GNTTABOP_copy: argsize = sizeof(struct gnttab_copy); break; case GNTTABOP_query_size: argsize = sizeof(struct gnttab_query_size); break; default: printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", __func__, cmd); BUG(); } *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); if (*desc == NULL) return -EINVAL; (*xc_area)++; return 0; }
int xencomm_hypercall_memory_op(unsigned int cmd, void *arg) { GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} }; struct xen_memory_reservation *xmr = NULL; int rc; struct xencomm_handle *desc; unsigned int argsize; XENCOMM_MINI_ALIGNED(xc_area, 2); switch (cmd) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: xmr = (struct xen_memory_reservation *)arg; set_xen_guest_handle(extent_start_va[0], xen_guest_handle(xmr->extent_start)); argsize = sizeof(*xmr); rc = xencommize_memory_reservation(xc_area, xmr); if (rc) return rc; xc_area++; break; case XENMEM_maximum_ram_page: argsize = 0; break; case XENMEM_add_to_physmap: argsize = sizeof(struct xen_add_to_physmap); break; default: printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); return -ENOSYS; } desc = xencomm_map_no_alloc(arg, argsize); if (desc == NULL) return -EINVAL; rc = xencomm_arch_hypercall_memory_op(cmd, desc); switch (cmd) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: set_xen_guest_handle(xmr->extent_start, xen_guest_handle(extent_start_va[0])); break; } return rc; }
static int xencommize_memory_reservation(struct xencomm_mini *xc_area, struct xen_memory_reservation *mop) { struct xencomm_handle *desc; desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), mop->nr_extents * sizeof(*xen_guest_handle(mop->extent_start)), xc_area); if (desc == NULL) return -EINVAL; set_xen_guest_handle(mop->extent_start, (void *)desc); return 0; }
int xencomm_hypercall_sched_op(int cmd, void *arg) { struct xencomm_handle *desc; unsigned int argsize; switch (cmd) { case SCHEDOP_yield: case SCHEDOP_block: argsize = 0; break; case SCHEDOP_shutdown: argsize = sizeof(struct sched_shutdown); break; case SCHEDOP_poll: { struct sched_poll *poll = arg; struct xencomm_handle *ports; argsize = sizeof(struct sched_poll); ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), sizeof(*xen_guest_handle(poll->ports))); set_xen_guest_handle(poll->ports, (void *)ports); break; } default: printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); return -ENOSYS; } desc = xencomm_map_no_alloc(arg, argsize); if (desc == NULL) return -EINVAL; return xencomm_arch_hypercall_sched_op(cmd, desc); }
static int xencomm_privcmd_acm_op(privcmd_hypercall_t *hypercall) { int cmd = hypercall->arg[0]; void __user *arg = (void __user *)hypercall->arg[1]; struct xencomm_handle *op_desc; struct xencomm_handle *desc = NULL; int ret; switch (cmd) { case ACMOP_getssid: { struct acm_getssid kern_arg; if (copy_from_user(&kern_arg, arg, sizeof (kern_arg))) return -EFAULT; op_desc = xencomm_create_inline(&kern_arg); ret = xencomm_create(xen_guest_handle(kern_arg.ssidbuf), kern_arg.ssidbuf_size, &desc, GFP_KERNEL); if (ret) return ret; set_xen_guest_handle(kern_arg.ssidbuf, (void *)desc); ret = xencomm_arch_hypercall_acm_op(cmd, op_desc); xencomm_free(desc); if (copy_to_user(arg, &kern_arg, sizeof (kern_arg))) return -EFAULT; return ret; } default: printk("%s: unknown acm_op cmd %d\n", __func__, cmd); return -ENOSYS; } return ret; }
int HYPERVISOR_grant_table_op(unsigned int cmd, void *op, unsigned int count) { void *desc; void *frame_list = NULL; int argsize; int ret = -ENOMEM; switch (cmd) { case GNTTABOP_map_grant_ref: argsize = sizeof(struct gnttab_map_grant_ref); break; case GNTTABOP_unmap_grant_ref: gnttab_pre_unmap_grant_ref(op, count); argsize = sizeof(struct gnttab_unmap_grant_ref); break; case GNTTABOP_setup_table: { struct gnttab_setup_table setup; memcpy(&setup, op, sizeof(setup)); argsize = sizeof(setup); frame_list = xencomm_map( xen_guest_handle(setup.frame_list), (sizeof(*xen_guest_handle(setup.frame_list)) * setup.nr_frames)); if (frame_list == NULL) return -ENOMEM; set_xen_guest_handle(setup.frame_list, frame_list); memcpy(op, &setup, sizeof(setup)); } break; case GNTTABOP_dump_table: argsize = sizeof(struct gnttab_dump_table); break; case GNTTABOP_transfer: BUG(); argsize = sizeof(struct gnttab_transfer); break; case GNTTABOP_copy: argsize = sizeof(struct gnttab_transfer); break; case GNTTABOP_query_size: argsize = sizeof(struct gnttab_query_size); break; default: printk(KERN_EMERG "%s: unknown grant table op %d\n", __func__, cmd); return -ENOSYS; } desc = xencomm_map_no_alloc(op, argsize); if (desc) { ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_grant_table_op), cmd, desc, count); if (!ret && cmd == GNTTABOP_map_grant_ref) gnttab_post_map_grant_ref(op, count); xencomm_free(desc); } xencomm_free(frame_list); return ret; }
static int xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall) { const unsigned long cmd = hypercall->arg[0]; int ret = 0; switch (cmd) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: return xencomm_privcmd_memory_reservation_op(hypercall); case XENMEM_translate_gpfn_list: { xen_translate_gpfn_list_t kern_op; xen_translate_gpfn_list_t __user *user_op; struct xencomm_handle *desc_gpfn = NULL; struct xencomm_handle *desc_mfn = NULL; struct xencomm_handle *desc_op; void *addr; user_op = (xen_translate_gpfn_list_t __user *) hypercall->arg[1]; if (copy_from_user(&kern_op, user_op, sizeof(xen_translate_gpfn_list_t))) return -EFAULT; desc_op = xencomm_create_inline(&kern_op); if (kern_op.nr_gpfns) { /* gpfn_list. */ addr = xen_guest_handle(kern_op.gpfn_list); ret = xencomm_create(addr, kern_op.nr_gpfns * sizeof(*xen_guest_handle (kern_op.gpfn_list)), &desc_gpfn, GFP_KERNEL); if (ret) return ret; set_xen_guest_handle(kern_op.gpfn_list, (void *)desc_gpfn); /* mfn_list. */ addr = xen_guest_handle(kern_op.mfn_list); ret = xencomm_create(addr, kern_op.nr_gpfns * sizeof(*xen_guest_handle (kern_op.mfn_list)), &desc_mfn, GFP_KERNEL); if (ret) return ret; set_xen_guest_handle(kern_op.mfn_list, (void *)desc_mfn); } ret = xencomm_arch_hypercall_memory_op(cmd, desc_op); if (desc_gpfn) xencomm_free(desc_gpfn); if (desc_mfn) xencomm_free(desc_mfn); if (ret != 0) return ret; return ret; } default: printk("%s: unknown memory op %lu\n", __func__, cmd); ret = -ENOSYS; } return ret; }
static int xencomm_privcmd_memory_reservation_op(privcmd_hypercall_t *hypercall) { const unsigned long cmd = hypercall->arg[0]; int ret = 0; xen_memory_reservation_t kern_op; xen_memory_reservation_t __user *user_op; struct xencomm_handle *desc = NULL; struct xencomm_handle *desc_op; user_op = (xen_memory_reservation_t __user *)hypercall->arg[1]; if (copy_from_user(&kern_op, user_op, sizeof(xen_memory_reservation_t))) return -EFAULT; desc_op = xencomm_create_inline(&kern_op); if (!xen_guest_handle(kern_op.extent_start)) { ret = xencomm_arch_hypercall_memory_op(cmd, desc_op); if (ret < 0) return ret; } else { xen_ulong_t nr_done = 0; xen_ulong_t nr_extents = kern_op.nr_extents; void *addr = xen_guest_handle(kern_op.extent_start); /* * Work around. * Xencomm has single page size limit caused * by xencomm_alloc()/xencomm_free() so that * we have to repeat the hypercall. * This limitation can be removed. */ #define MEMORYOP_XENCOMM_LIMIT \ (((((PAGE_SIZE - sizeof(struct xencomm_desc)) / \ sizeof(uint64_t)) - 2) * PAGE_SIZE) / \ sizeof(*xen_guest_handle(kern_op.extent_start))) /* * Work around. * Even if the above limitation is removed, * the hypercall with large number of extents * may cause the soft lockup warning. * In order to avoid the warning, we limit * the number of extents and repeat the hypercall. * The following value is determined by evaluation. * Time of one hypercall should be smaller than * a vcpu time slice. The time with current * MEMORYOP_MAX_EXTENTS is around 5 msec. * If the following limit causes some issues, * we should decrease this value. * * Another way would be that start with small value and * increase adoptively measuring hypercall time. * It might be over-kill. */ #define MEMORYOP_MAX_EXTENTS (MEMORYOP_XENCOMM_LIMIT / 512) while (nr_extents > 0) { xen_ulong_t nr_tmp = nr_extents; if (nr_tmp > MEMORYOP_MAX_EXTENTS) nr_tmp = MEMORYOP_MAX_EXTENTS; kern_op.nr_extents = nr_tmp; ret = xencomm_create (addr + nr_done * sizeof(*xen_guest_handle(kern_op.extent_start)), nr_tmp * sizeof(*xen_guest_handle(kern_op.extent_start)), &desc, GFP_KERNEL); if (addr != NULL && nr_tmp > 0 && desc == NULL) return nr_done > 0 ? nr_done : -ENOMEM; set_xen_guest_handle(kern_op.extent_start, (void *)desc); ret = xencomm_arch_hypercall_memory_op(cmd, desc_op); xencomm_free(desc); if (ret < 0) return nr_done > 0 ? nr_done : ret; nr_done += ret; nr_extents -= ret; if (ret < nr_tmp) break; /* * prevent softlock up message. * give cpu to soft lockup kernel thread. */ if (nr_extents > 0) schedule(); } ret = nr_done; set_xen_guest_handle(kern_op.extent_start, addr); } if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t))) return -EFAULT; return ret; }
static int xencomm_privcmd_domctl(privcmd_hypercall_t *hypercall) { xen_domctl_t kern_op; xen_domctl_t __user *user_op; struct xencomm_handle *op_desc; struct xencomm_handle *desc = NULL; int ret = 0; user_op = (xen_domctl_t __user *)hypercall->arg[0]; if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t))) return -EFAULT; if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION) return -EACCES; op_desc = xencomm_create_inline(&kern_op); switch (kern_op.cmd) { case XEN_DOMCTL_createdomain: case XEN_DOMCTL_destroydomain: case XEN_DOMCTL_pausedomain: case XEN_DOMCTL_unpausedomain: case XEN_DOMCTL_getdomaininfo: break; case XEN_DOMCTL_getmemlist: { unsigned long nr_pages = kern_op.u.getmemlist.max_pfns; ret = xencomm_create( xen_guest_handle(kern_op.u.getmemlist.buffer), nr_pages * sizeof(unsigned long), &desc, GFP_KERNEL); set_xen_guest_handle(kern_op.u.getmemlist.buffer, (void *)desc); break; } case XEN_DOMCTL_getpageframeinfo: break; case XEN_DOMCTL_getpageframeinfo2: ret = xencomm_create( xen_guest_handle(kern_op.u.getpageframeinfo2.array), kern_op.u.getpageframeinfo2.num, &desc, GFP_KERNEL); set_xen_guest_handle(kern_op.u.getpageframeinfo2.array, (void *)desc); break; case XEN_DOMCTL_shadow_op: ret = xencomm_create( xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap), ROUND_DIV(kern_op.u.shadow_op.pages, 8), &desc, GFP_KERNEL); set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap, (void *)desc); break; case XEN_DOMCTL_max_mem: break; case XEN_DOMCTL_setvcpucontext: case XEN_DOMCTL_getvcpucontext: ret = xencomm_create( xen_guest_handle(kern_op.u.vcpucontext.ctxt), sizeof(vcpu_guest_context_t), &desc, GFP_KERNEL); set_xen_guest_handle(kern_op.u.vcpucontext.ctxt, (void *)desc); break; case XEN_DOMCTL_getvcpuinfo: break; case XEN_DOMCTL_setvcpuaffinity: case XEN_DOMCTL_getvcpuaffinity: ret = xencomm_create( xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap), ROUND_DIV(kern_op.u.vcpuaffinity.cpumap.nr_cpus, 8), &desc, GFP_KERNEL); set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap, (void *)desc); break; case XEN_DOMCTL_max_vcpus: case XEN_DOMCTL_scheduler_op: case XEN_DOMCTL_setdomainhandle: case XEN_DOMCTL_setdebugging: case XEN_DOMCTL_irq_permission: case XEN_DOMCTL_iomem_permission: case XEN_DOMCTL_ioport_permission: case XEN_DOMCTL_hypercall_init: case XEN_DOMCTL_arch_setup: case XEN_DOMCTL_settimeoffset: case XEN_DOMCTL_sendtrigger: case XEN_DOMCTL_set_address_size: case XEN_DOMCTL_get_address_size: break; default: printk("%s: unknown domctl cmd %d\n", __func__, kern_op.cmd); return -ENOSYS; } if (ret) { /* error mapping the nested pointer */ return ret; } ret = xencomm_arch_hypercall_domctl(op_desc); if (kern_op.cmd == XEN_DOMCTL_destroydomain) { while (ret == -EAGAIN) { schedule(); /* prevent softlock up message */ ret = xencomm_arch_hypercall_domctl(op_desc); } } /* FIXME: should we restore the handle? */ if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t))) ret = -EFAULT; if (desc) xencomm_free(desc); return ret; }
static int xencomm_privcmd_sysctl(privcmd_hypercall_t *hypercall) { xen_sysctl_t kern_op; xen_sysctl_t __user *user_op; struct xencomm_handle *op_desc; struct xencomm_handle *desc = NULL; struct xencomm_handle *desc1 = NULL; int ret = 0; user_op = (xen_sysctl_t __user *)hypercall->arg[0]; if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t))) return -EFAULT; if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION) { /* * RHEL5 ABI compat: Allow through physinfo calls with * newer versions for NUMA extensions */ if (kern_op.cmd == XEN_SYSCTL_physinfo && kern_op.interface_version == (XEN_SYSCTL_INTERFACE_VERSION+1)) printk(KERN_DEBUG "Allowing physinfo call with newer ABI version\n"); else return -EACCES; } op_desc = xencomm_create_inline(&kern_op); switch (kern_op.cmd) { case XEN_SYSCTL_readconsole: ret = xencomm_create( xen_guest_handle(kern_op.u.readconsole.buffer), kern_op.u.readconsole.count, &desc, GFP_KERNEL); set_xen_guest_handle(kern_op.u.readconsole.buffer, (void *)desc); break; case XEN_SYSCTL_tbuf_op: case XEN_SYSCTL_sched_id: break; case XEN_SYSCTL_perfc_op: { struct xencomm_handle *tmp_desc; xen_sysctl_t tmp_op = { .cmd = XEN_SYSCTL_perfc_op, .interface_version = XEN_SYSCTL_INTERFACE_VERSION, .u.perfc_op = { .cmd = XEN_SYSCTL_PERFCOP_query, // .desc.p = NULL, // .val.p = NULL, }, }; if (xen_guest_handle(kern_op.u.perfc_op.desc) == NULL) { if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL) return -EINVAL; break; } /* query the buffer size for xencomm */ tmp_desc = xencomm_create_inline(&tmp_op); ret = xencomm_arch_hypercall_sysctl(tmp_desc); if (ret) return ret; ret = xencomm_create(xen_guest_handle(kern_op.u.perfc_op.desc), tmp_op.u.perfc_op.nr_counters * sizeof(xen_sysctl_perfc_desc_t), &desc, GFP_KERNEL); if (ret) return ret; set_xen_guest_handle(kern_op.u.perfc_op.desc, (void *)desc); ret = xencomm_create(xen_guest_handle(kern_op.u.perfc_op.val), tmp_op.u.perfc_op.nr_vals * sizeof(xen_sysctl_perfc_val_t), &desc1, GFP_KERNEL); if (ret) xencomm_free(desc); set_xen_guest_handle(kern_op.u.perfc_op.val, (void *)desc1); break; } case XEN_SYSCTL_getdomaininfolist: ret = xencomm_create( xen_guest_handle(kern_op.u.getdomaininfolist.buffer), kern_op.u.getdomaininfolist.max_domains * sizeof(xen_domctl_getdomaininfo_t), &desc, GFP_KERNEL); set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer, (void *)desc); break; case XEN_SYSCTL_physinfo: ret = xencomm_create( xen_guest_handle(kern_op.u.physinfo.cpu_to_node), kern_op.u.physinfo.max_cpu_id * sizeof(uint32_t), &desc, GFP_KERNEL); if (ret) return ret; set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node, (void *)desc); break; default: printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd); return -ENOSYS; } if (ret) { /* error mapping the nested pointer */ return ret; } ret = xencomm_arch_hypercall_sysctl(op_desc); /* FIXME: should we restore the handles? */ if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t))) ret = -EFAULT; if (desc) xencomm_free(desc); if (desc1) xencomm_free(desc1); return ret; }