BUG_ON(info->cpu != smp_processor_id()); error = microcode_update_cpu(info->buffer, info->buffer_size); if ( error ) info->error = error; info->cpu = cpumask_next(info->cpu, &cpu_online_map); if ( info->cpu < nr_cpu_ids ) return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info); error = info->error; xfree(info); return error; } int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) buf, unsigned long len) { int ret; struct microcode_info *info; if ( len != (uint32_t)len ) return -E2BIG; if ( microcode_ops == NULL ) return -EINVAL; info = xmalloc_bytes(sizeof(*info) + len); if ( info == NULL ) return -ENOMEM; ret = copy_from_guest(info->buffer, buf, len);
/* See if populate-on-demand wants to handle this */ if ( is_hvm_domain(a->domain) && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) ) continue; for ( j = 0; j < (1 << a->extent_order); j++ ) if ( !guest_remove_page(a->domain, gmfn + j) ) goto out; } out: a->nr_done = i; } static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) { struct xen_memory_exchange exch; PAGE_LIST_HEAD(in_chunk_list); PAGE_LIST_HEAD(out_chunk_list); unsigned long in_chunk_order, out_chunk_order; xen_pfn_t gpfn, gmfn, mfn; unsigned long i, j, k; unsigned int memflags = 0; long rc = 0; struct domain *d; struct page_info *page; if ( copy_from_guest(&exch, arg, 1) ) return -EFAULT;
#include <xen/trace.h> #include <xen/console.h> #include <xen/iocap.h> #include <xen/guest_access.h> #include <xen/keyhandler.h> #include <asm/current.h> #include <xen/hypercall.h> #include <public/sysctl.h> #include <asm/numa.h> #include <xen/nodemask.h> #include <xsm/xsm.h> #include <xen/pmstat.h> #include <xen/livepatch.h> #include <xen/gcov.h> long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) { long ret = 0; int copyback = -1; struct xen_sysctl curop, *op = &curop; static DEFINE_SPINLOCK(sysctl_lock); if ( copy_from_guest(op, u_sysctl, 1) ) return -EFAULT; if ( op->interface_version != XEN_SYSCTL_INTERFACE_VERSION ) return -EACCES; ret = xsm_sysctl(XSM_PRIV, op->cmd); if ( ret ) return ret;
&dsec->self_sid); if ( d->target ) { struct domain_security_struct *tsec = d->target->ssid; security_transition_sid(tsec->sid, dsec->sid, SECCLASS_DOMAIN, &dsec->target_sid); } out: rcu_unlock_domain(d); return rc; } #endif /* !COMPAT */ ret_t do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op) { xen_flask_op_t op; int rv; if ( copy_from_guest(&op, u_flask_op, 1) ) return -EFAULT; if ( op.interface_version != XEN_FLASK_INTERFACE_VERSION ) return -ENOSYS; switch ( op.cmd ) { case FLASK_LOAD: rv = flask_security_load(&op.u.load); break;
static DEFINE_PER_CPU(uint64_t, freq); static long cpu_frequency_change_helper(void *data) { return cpu_frequency_change(this_cpu(freq)); } /* from sysctl.c */ long cpu_up_helper(void *data); long cpu_down_helper(void *data); /* from core_parking.c */ long core_parking_helper(void *data); uint32_t get_cur_idle_nums(void); ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) { ret_t ret = 0; struct xen_platform_op curop, *op = &curop; if ( copy_from_guest(op, u_xenpf_op, 1) ) return -EFAULT; if ( op->interface_version != XENPF_INTERFACE_VERSION ) return -EACCES; ret = xsm_platform_op(XSM_PRIV, op->cmd); if ( ret ) return ret; /*
#include <xen/event.h> #include <xen/mem_access.h> #include <xen/multicall.h> #include <compat/memory.h> #include <compat/xen.h> #include <asm/mem_paging.h> #include <asm/mem_sharing.h> int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int entries) { unsigned int i, nr_pages = (entries + 511) / 512; unsigned long frames[16]; long ret; /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */ if ( entries > FIRST_RESERVED_GDT_ENTRY ) return -EINVAL; if ( !guest_handle_okay(frame_list, nr_pages) ) return -EFAULT; for ( i = 0; i < nr_pages; ++i ) { unsigned int frame; if ( __copy_from_guest(&frame, frame_list, 1) ) return -EFAULT; frames[i] = frame; guest_handle_add_offset(frame_list, 1); }
{ __trace_hypercall(TRC_PV_HYPERCALL_SUBCALL, call->op, call->args); } #endif static void trace_multicall_call(multicall_entry_t *call) { if ( !tb_init_done ) return; __trace_multicall_call(call); } ret_t do_multicall( XEN_GUEST_HANDLE_PARAM(multicall_entry_t) call_list, uint32_t nr_calls) { struct mc_state *mcs = ¤t->mc_state; uint32_t i; int rc = 0; if ( unlikely(__test_and_set_bit(_MCSF_in_multicall, &mcs->flags)) ) { gdprintk(XENLOG_INFO, "Multicall reentry is disallowed.\n"); return -EINVAL; } if ( unlikely(!guest_handle_okay(call_list, nr_calls)) ) rc = -EFAULT; for ( i = 0; !rc && i < nr_calls; i++ )