#include <xen/config.h> #include <xen/types.h> #include <xen/lib.h> #include <xen/sched.h> #include <xen/domain.h> #include <xen/guest_access.h> #include <xen/acpi.h> #include <public/platform.h> #include <acpi/cpufreq/processor_perf.h> DEFINE_SPINLOCK(xenpf_lock); extern int set_px_pminfo(uint32_t cpu, struct xen_processor_performance *perf); extern long set_cx_pminfo(uint32_t cpu, struct xen_processor_power *power); long do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op) { long ret = 0; struct xen_platform_op curop, *op = &curop; if ( !IS_PRIV(current->domain) ) return -EPERM; if ( copy_from_guest(op, u_xenpf_op, 1) ) return -EFAULT; if ( op->interface_version != XENPF_INTERFACE_VERSION ) return -EACCES; switch ( op->cmd ) {
#ifdef CONFIG_COMPAT #include <xen/event.h> #include <xen/multicall.h> #include <compat/memory.h> #include <compat/xen.h> int compat_set_gdt(XEN_GUEST_HANDLE(uint) frame_list, unsigned int entries) { unsigned int i, nr_pages = (entries + 511) / 512; unsigned long frames[16]; long ret; /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */ if ( entries > FIRST_RESERVED_GDT_ENTRY ) return -EINVAL; if ( !guest_handle_okay(frame_list, nr_pages) ) return -EFAULT; for ( i = 0; i < nr_pages; ++i ) { unsigned int frame; if ( __copy_from_guest(&frame, frame_list, 1) ) return -EFAULT; frames[i] = frame; guest_handle_add_offset(frame_list, 1); } LOCK_BIGLOCK(current->domain);
/* See if populate-on-demand wants to handle this */ if ( is_hvm_domain(a->domain) && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) ) continue; for ( j = 0; j < (1 << a->extent_order); j++ ) if ( !guest_remove_page(a->domain, gmfn + j) ) goto out; } out: a->nr_done = i; mcd_mem_upt_trap(a->domain); } static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg) { struct xen_memory_exchange exch; PAGE_LIST_HEAD(in_chunk_list); PAGE_LIST_HEAD(out_chunk_list); unsigned long in_chunk_order, out_chunk_order; xen_pfn_t gpfn, gmfn, mfn; unsigned long i, j, k; unsigned int memflags = 0; long rc = 0; struct domain *d; struct page_info *page; if ( copy_from_guest(&exch, arg, 1) ) return -EFAULT;
#include <public/event_channel.h> #include <asm/vmmu.h> #include <asm/tlb.h> #include <asm/regionreg.h> #include <asm/page.h> #include <xen/mm.h> #include <xen/multicall.h> #include <xen/hypercall.h> #include <public/version.h> #include <asm/dom_fw.h> #include <xen/domain.h> #include <asm/vmx.h> #include <asm/viosapic.h> static int hvmop_set_isa_irq_level( XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop) { struct xen_hvm_set_isa_irq_level op; struct domain *d; int rc; if ( copy_from_guest(&op, uop, 1) ) return -EFAULT; if ( op.isa_irq > 15 ) return -EINVAL; rc = rcu_lock_target_domain_by_id(op.domid, &d); if ( rc != 0 ) return rc;
!(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) ) gnttab_clear_flag(_GTF_writing, &sha->flags); if ( !act->pin ) gnttab_clear_flag(_GTF_reading, &sha->flags); unlock_out: spin_unlock(&rd->grant_table->lock); op->status = rc; put_maptrack_handle(ld->grant_table, handle); rcu_unlock_domain(rd); } static long gnttab_map_grant_ref( XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count) { int i; struct gnttab_map_grant_ref op; for ( i = 0; i < count; i++ ) { if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) ) return -EFAULT; __gnttab_map_grant_ref(&op); if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) ) return -EFAULT; } return 0; }
#include <xen/keyhandler.h> #include <asm/current.h> #include <xen/hypercall.h> #include <public/sysctl.h> #include <asm/numa.h> #include <xen/nodemask.h> #include <xsm/xsm.h> #include <xen/pmstat.h> extern long arch_do_sysctl( struct xen_sysctl *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl); #ifdef LOCK_PROFILE extern int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc); #endif long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl) { long ret = 0; struct xen_sysctl curop, *op = &curop; static DEFINE_SPINLOCK(sysctl_lock); if ( !IS_PRIV(current->domain) ) return -EPERM; if ( copy_from_guest(op, u_sysctl, 1) ) return -EFAULT; if ( op->interface_version != XEN_SYSCTL_INTERFACE_VERSION ) return -EACCES; /*
#include <asm/vmx.h> #include <asm/dom_fw.h> #include <asm/vhpt.h> #include <xen/iocap.h> #include <xen/errno.h> #include <xen/nodemask.h> #include <asm/dom_fw_utils.h> #include <asm/hvm/support.h> #include <xsm/xsm.h> #include <public/hvm/save.h> #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) extern unsigned long total_pages; long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) { long ret = 0; switch ( op->cmd ) { case XEN_DOMCTL_getmemlist: { unsigned long i; struct domain *d = rcu_lock_domain_by_id(op->domain); unsigned long start_page = op->u.getmemlist.start_pfn; unsigned long nr_pages = op->u.getmemlist.max_pfns; uint64_t mfn; if ( d == NULL ) { ret = -EINVAL;
static int dummy_lockprof (void) { return 0; } static int dummy_cpupool_op (void) { return 0; } static int dummy_sched_op (void) { return 0; } static long dummy___do_xsm_op(XEN_GUEST_HANDLE(xsm_op_t) op) { return -ENOSYS; } static char *dummy_show_irq_sid (int irq) { return NULL; } static int dummy_map_domain_pirq (struct domain *d, int irq, void *data) { return 0; } static int dummy_irq_permission (struct domain *d, int pirq, uint8_t allow)
} if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) ) goto out; for ( j = 0; j < (1 << a->extent_order); j++ ) if ( !guest_remove_page(a->domain, gmfn + j) ) goto out; } out: a->nr_done = i; } static long translate_gpfn_list( XEN_GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress) { struct xen_translate_gpfn_list op; unsigned long i; xen_pfn_t gpfn; xen_pfn_t mfn; struct domain *d; int rc; if ( copy_from_guest(&op, uop, 1) ) return -EFAULT; /* Is size too large for us to encode a continuation? */ if ( op.nr_gpfns > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) ) return -EINVAL;