int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt) { DECLARE_SYSCTL; DECLARE_NAMED_HYPERCALL_BOUNCE(triggers, cxpt->triggers, cxpt->nr * sizeof(*cxpt->triggers), XC_HYPERCALL_BUFFER_BOUNCE_OUT); DECLARE_NAMED_HYPERCALL_BOUNCE(residencies, cxpt->residencies, cxpt->nr * sizeof(*cxpt->residencies), XC_HYPERCALL_BUFFER_BOUNCE_OUT); DECLARE_NAMED_HYPERCALL_BOUNCE(pc, cxpt->pc, cxpt->nr_pc * sizeof(*cxpt->pc), XC_HYPERCALL_BUFFER_BOUNCE_OUT); DECLARE_NAMED_HYPERCALL_BOUNCE(cc, cxpt->cc, cxpt->nr_cc * sizeof(*cxpt->cc), XC_HYPERCALL_BUFFER_BOUNCE_OUT); int ret = -1; if ( xc_hypercall_bounce_pre(xch, triggers) ) goto unlock_0; if ( xc_hypercall_bounce_pre(xch, residencies) ) goto unlock_1; if ( xc_hypercall_bounce_pre(xch, pc) ) goto unlock_2; if ( xc_hypercall_bounce_pre(xch, cc) ) goto unlock_3; sysctl.cmd = XEN_SYSCTL_get_pmstat; sysctl.u.get_pmstat.type = PMSTAT_get_cxstat; sysctl.u.get_pmstat.cpuid = cpuid; sysctl.u.get_pmstat.u.getcx.nr = cxpt->nr; sysctl.u.get_pmstat.u.getcx.nr_pc = cxpt->nr_pc; sysctl.u.get_pmstat.u.getcx.nr_cc = cxpt->nr_cc; set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, triggers); set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies, residencies); set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.pc, pc); set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.cc, cc); if ( (ret = xc_sysctl(xch, &sysctl)) ) goto unlock_4; cxpt->nr = sysctl.u.get_pmstat.u.getcx.nr; cxpt->last = sysctl.u.get_pmstat.u.getcx.last; cxpt->idle_time = sysctl.u.get_pmstat.u.getcx.idle_time; cxpt->nr_pc = sysctl.u.get_pmstat.u.getcx.nr_pc; cxpt->nr_cc = sysctl.u.get_pmstat.u.getcx.nr_cc; unlock_4: xc_hypercall_bounce_post(xch, cc); unlock_3: xc_hypercall_bounce_post(xch, pc); unlock_2: xc_hypercall_bounce_post(xch, residencies); unlock_1: xc_hypercall_bounce_post(xch, triggers); unlock_0: return ret; }
int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt) { DECLARE_SYSCTL; /* Sizes unknown until xc_pm_get_max_px */ DECLARE_NAMED_HYPERCALL_BOUNCE(trans, pxpt->trans_pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH); DECLARE_NAMED_HYPERCALL_BOUNCE(pt, pxpt->pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH); int max_px, ret; if ( !pxpt->trans_pt || !pxpt->pt ) { errno = EINVAL; return -1; } if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0) return ret; HYPERCALL_BOUNCE_SET_SIZE(trans, max_px * max_px * sizeof(uint64_t)); HYPERCALL_BOUNCE_SET_SIZE(pt, max_px * sizeof(struct xc_px_val)); if ( xc_hypercall_bounce_pre(xch, trans) ) return ret; if ( xc_hypercall_bounce_pre(xch, pt) ) { xc_hypercall_bounce_post(xch, trans); return ret; } sysctl.cmd = XEN_SYSCTL_get_pmstat; sysctl.u.get_pmstat.type = PMSTAT_get_pxstat; sysctl.u.get_pmstat.cpuid = cpuid; sysctl.u.get_pmstat.u.getpx.total = max_px; set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, trans); set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt, pt); ret = xc_sysctl(xch, &sysctl); if ( ret ) { xc_hypercall_bounce_post(xch, trans); xc_hypercall_bounce_post(xch, pt); return ret; } pxpt->total = sysctl.u.get_pmstat.u.getpx.total; pxpt->usable = sysctl.u.get_pmstat.u.getpx.usable; pxpt->last = sysctl.u.get_pmstat.u.getpx.last; pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur; xc_hypercall_bounce_post(xch, trans); xc_hypercall_bounce_post(xch, pt); return ret; }
static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu) { int rc, err = 0; DECLARE_NAMED_HYPERCALL_BOUNCE(updates, mmu->updates, mmu->idx*sizeof(*mmu->updates), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); if ( mmu->idx == 0 ) return 0; if ( xc_hypercall_bounce_pre(xch, updates) ) { PERROR("flush_mmu_updates: bounce buffer failed"); err = 1; goto out; } rc = xencall4(xch->xcall, __HYPERVISOR_mmu_update, HYPERCALL_BUFFER_AS_ARG(updates), mmu->idx, 0, mmu->subject); if ( rc < 0 ) { ERROR("Failure when submitting mmu updates"); err = 1; } mmu->idx = 0; xc_hypercall_bounce_post(xch, updates); out: return err; }
static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu) { int err = 0; DECLARE_HYPERCALL; DECLARE_NAMED_HYPERCALL_BOUNCE(updates, mmu->updates, mmu->idx*sizeof(*mmu->updates), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); if ( mmu->idx == 0 ) return 0; if ( xc_hypercall_bounce_pre(xch, updates) ) { PERROR("flush_mmu_updates: bounce buffer failed"); err = 1; goto out; } hypercall.op = __HYPERVISOR_mmu_update; hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(updates); hypercall.arg[1] = (unsigned long)mmu->idx; hypercall.arg[2] = 0; hypercall.arg[3] = mmu->subject; if ( do_xen_hypercall(xch, &hypercall) < 0 ) { ERROR("Failure when submitting mmu updates"); err = 1; } mmu->idx = 0; xc_hypercall_bounce_post(xch, updates); out: return err; }
static int xc_resource_op_one(xc_interface *xch, xc_resource_op_t *op) { int rc; DECLARE_PLATFORM_OP; DECLARE_NAMED_HYPERCALL_BOUNCE(entries, op->entries, op->nr_entries * sizeof(*op->entries), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); if ( xc_hypercall_bounce_pre(xch, entries) ) return -1; platform_op.cmd = XENPF_resource_op; platform_op.u.resource_op.nr_entries = op->nr_entries; platform_op.u.resource_op.cpu = op->cpu; set_xen_guest_handle(platform_op.u.resource_op.entries, entries); rc = do_platform_op(xch, &platform_op); op->result = rc; xc_hypercall_bounce_post(xch, entries); return rc; }
/* * 1. Get PM parameter * 2. Provide user PM control */ int xc_get_cpufreq_para(xc_interface *xch, int cpuid, struct xc_get_cpufreq_para *user_para) { DECLARE_SYSCTL; int ret = 0; struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para; DECLARE_NAMED_HYPERCALL_BOUNCE(affected_cpus, user_para->affected_cpus, user_para->cpu_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_frequencies, user_para->scaling_available_frequencies, user_para->freq_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors, user_para->scaling_available_governors, user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), XC_HYPERCALL_BUFFER_BOUNCE_BOTH); bool has_num = user_para->cpu_num && user_para->freq_num && user_para->gov_num; if ( has_num ) { if ( (!user_para->affected_cpus) || (!user_para->scaling_available_frequencies) || (!user_para->scaling_available_governors) ) { errno = EINVAL; return -1; } if ( xc_hypercall_bounce_pre(xch, affected_cpus) ) goto unlock_1; if ( xc_hypercall_bounce_pre(xch, scaling_available_frequencies) ) goto unlock_2; if ( xc_hypercall_bounce_pre(xch, scaling_available_governors) ) goto unlock_3; set_xen_guest_handle(sys_para->affected_cpus, affected_cpus); set_xen_guest_handle(sys_para->scaling_available_frequencies, scaling_available_frequencies); set_xen_guest_handle(sys_para->scaling_available_governors, scaling_available_governors); } sysctl.cmd = XEN_SYSCTL_pm_op; sysctl.u.pm_op.cmd = GET_CPUFREQ_PARA; sysctl.u.pm_op.cpuid = cpuid; sys_para->cpu_num = user_para->cpu_num; sys_para->freq_num = user_para->freq_num; sys_para->gov_num = user_para->gov_num; ret = xc_sysctl(xch, &sysctl); if ( ret ) { if ( errno == EAGAIN ) { user_para->cpu_num = sys_para->cpu_num; user_para->freq_num = sys_para->freq_num; user_para->gov_num = sys_para->gov_num; ret = -errno; } if ( has_num ) goto unlock_4; goto unlock_1; } else { user_para->cpuinfo_cur_freq = sys_para->cpuinfo_cur_freq; user_para->cpuinfo_max_freq = sys_para->cpuinfo_max_freq; user_para->cpuinfo_min_freq = sys_para->cpuinfo_min_freq; user_para->scaling_cur_freq = sys_para->scaling_cur_freq; user_para->scaling_max_freq = sys_para->scaling_max_freq; user_para->scaling_min_freq = sys_para->scaling_min_freq; user_para->turbo_enabled = sys_para->turbo_enabled; memcpy(user_para->scaling_driver, sys_para->scaling_driver, CPUFREQ_NAME_LEN); memcpy(user_para->scaling_governor, sys_para->scaling_governor, CPUFREQ_NAME_LEN); /* copy to user_para no matter what cpufreq governor */ XC_BUILD_BUG_ON(sizeof(((struct xc_get_cpufreq_para *)0)->u) != sizeof(((struct xen_get_cpufreq_para *)0)->u)); memcpy(&user_para->u, &sys_para->u, sizeof(sys_para->u)); } unlock_4: xc_hypercall_bounce_post(xch, scaling_available_governors); unlock_3: xc_hypercall_bounce_post(xch, scaling_available_frequencies); unlock_2: xc_hypercall_bounce_post(xch, affected_cpus); unlock_1: return ret; }