static efi_system_table_t __init *xen_efi_probe(void) { struct xen_platform_op op = { .cmd = XENPF_firmware_info, .u.firmware_info = { .type = XEN_FW_EFI_INFO, .index = XEN_FW_EFI_CONFIG_TABLE } }; union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info; if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0) return NULL; /* Here we know that Xen runs on EFI platform. */ efi.get_time = xen_efi_get_time; efi.set_time = xen_efi_set_time; efi.get_wakeup_time = xen_efi_get_wakeup_time; efi.set_wakeup_time = xen_efi_set_wakeup_time; efi.get_variable = xen_efi_get_variable; efi.get_next_variable = xen_efi_get_next_variable; efi.set_variable = xen_efi_set_variable; efi.query_variable_info = xen_efi_query_variable_info; efi.update_capsule = xen_efi_update_capsule; efi.query_capsule_caps = xen_efi_query_capsule_caps; efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count; efi.reset_system = xen_efi_reset_system; efi_systab_xen.tables = info->cfg.addr; efi_systab_xen.nr_tables = info->cfg.nent; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_VENDOR; info->vendor.bufsz = sizeof(vendor); set_xen_guest_handle(info->vendor.name, vendor); if (HYPERVISOR_platform_op(&op) == 0) { efi_systab_xen.fw_vendor = __pa_symbol(vendor); efi_systab_xen.fw_revision = info->vendor.revision; } else efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN"); op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_VERSION; if (HYPERVISOR_platform_op(&op) == 0) efi_systab_xen.hdr.revision = info->version; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_EFI_INFO; op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION; if (HYPERVISOR_platform_op(&op) == 0) efi.runtime_version = info->version; return &efi_systab_xen; }
/* * Returns the effective MTRR type for the region * Error returns: * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR * - 0xFF - when MTRR is not enabled */ u8 mtrr_type_lookup(u64 start, u64 end) { int i, error; u64 start_mfn, end_mfn, base_mfn, top_mfn; u8 prev_match, curr_match; struct xen_platform_op op; if (!is_initial_xendomain()) return MTRR_TYPE_WRBACK; if (!num_var_ranges) return 0xFF; start_mfn = start >> PAGE_SHIFT; /* Make end inclusive end, instead of exclusive */ end_mfn = --end >> PAGE_SHIFT; /* Look in fixed ranges. Just return the type as per start */ if (start_mfn < 0x100) { #if 0//todo op.cmd = XENPF_read_memtype; op.u.read_memtype.reg = ???; error = HYPERVISOR_platform_op(&op); if (!error) return op.u.read_memtype.type; #endif return MTRR_TYPE_UNCACHABLE; }
int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, bool increment) { int error; struct xen_platform_op op; mutex_lock(&mtrr_mutex); op.cmd = XENPF_add_memtype; op.u.add_memtype.mfn = base; op.u.add_memtype.nr_mfns = size; op.u.add_memtype.type = type; error = HYPERVISOR_platform_op(&op); if (error) { mutex_unlock(&mtrr_mutex); BUG_ON(error > 0); return error; } if (increment) ++mtrr_usage_table[op.u.add_memtype.reg]; mutex_unlock(&mtrr_mutex); return op.u.add_memtype.reg; }
void __init copy_edd(void) { int ret; struct xen_platform_op op; if (!is_initial_xendomain()) return; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_DISK_INFO; for (op.u.firmware_info.index = 0; edd.edd_info_nr < EDDMAXNR; op.u.firmware_info.index++) { struct edd_info *info = edd.edd_info + edd.edd_info_nr; info->params.length = sizeof(info->params); set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, &info->params); ret = HYPERVISOR_platform_op(&op); if (ret) break; #define C(x) info->x = op.u.firmware_info.u.disk_info.x C(device); C(version); C(interface_support); C(legacy_max_cylinder); C(legacy_max_head); C(legacy_sectors_per_track); #undef C edd.edd_info_nr++; } op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; for (op.u.firmware_info.index = 0; edd.mbr_signature_nr < EDD_MBR_SIG_MAX; op.u.firmware_info.index++) { ret = HYPERVISOR_platform_op(&op); if (ret) break; edd.mbr_signature[edd.mbr_signature_nr++] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; } }
static int xen_pvclock_gtod_notify(struct notifier_block *nb, unsigned long was_set, void *priv) { /* Protected by the calling core code serialization */ static struct timespec64 next_sync; struct xen_platform_op op; struct timespec64 now; struct timekeeper *tk = priv; static bool settime64_supported = true; int ret; now.tv_sec = tk->xtime_sec; now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); /* * We only take the expensive HV call when the clock was set * or when the 11 minutes RTC synchronization time elapsed. */ if (!was_set && timespec64_compare(&now, &next_sync) < 0) return NOTIFY_OK; again: if (settime64_supported) { op.cmd = XENPF_settime64; op.u.settime64.mbz = 0; op.u.settime64.secs = now.tv_sec; op.u.settime64.nsecs = now.tv_nsec; op.u.settime64.system_time = xen_clocksource_read(); } else { op.cmd = XENPF_settime32; op.u.settime32.secs = now.tv_sec; op.u.settime32.nsecs = now.tv_nsec; op.u.settime32.system_time = xen_clocksource_read(); } ret = HYPERVISOR_platform_op(&op); if (ret == -ENOSYS && settime64_supported) { settime64_supported = false; goto again; } if (ret < 0) return NOTIFY_BAD; /* * Move the next drift compensation time 11 minutes * ahead. That's emulating the sync_cmos_clock() update for * the hardware RTC. */ next_sync = now; next_sync.tv_sec += 11 * 60; return NOTIFY_OK; }
efi_status_t xen_efi_get_next_high_mono_count(u32 *count) { struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count); if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *count = efi_data(op).misc; return efi_data(op).status; }
static void __init set_num_var_ranges(void) { struct xen_platform_op op; for (num_var_ranges = 0; ; num_var_ranges++) { op.cmd = XENPF_read_memtype; op.u.read_memtype.reg = num_var_ranges; if (HYPERVISOR_platform_op(&op) != 0) break; } }
efi_status_t xen_efi_set_time(efi_time_t *tm) { struct xen_platform_op op = INIT_EFI_OP(set_time); BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.set_time)); memcpy(&efi_data(op).u.set_time, tm, sizeof(*tm)); if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; return efi_data(op).status; }
void generic_get_mtrr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type * type) { struct xen_platform_op op; op.cmd = XENPF_read_memtype; op.u.read_memtype.reg = reg; if (unlikely(HYPERVISOR_platform_op(&op))) memset(&op.u.read_memtype, 0, sizeof(op.u.read_memtype)); *size = op.u.read_memtype.nr_mfns; *base = op.u.read_memtype.mfn; *type = op.u.read_memtype.type; }
efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) { struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time); BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.set_wakeup_time)); if (enabled) efi_data(op).misc = XEN_EFI_SET_WAKEUP_TIME_ENABLE; if (tm) memcpy(&efi_data(op).u.set_wakeup_time, tm, sizeof(*tm)); else efi_data(op).misc |= XEN_EFI_SET_WAKEUP_TIME_ENABLE_ONLY; if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; return efi_data(op).status; }
void __init copy_edid(void) { #if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86) struct xen_platform_op op; if (!is_initial_xendomain()) return; op.cmd = XENPF_firmware_info; op.u.firmware_info.index = 0; op.u.firmware_info.type = XEN_FW_VBEDDC_INFO; set_xen_guest_handle(op.u.firmware_info.u.vbeddc_info.edid, edid_info.dummy); if (HYPERVISOR_platform_op(&op) != 0) memset(edid_info.dummy, 0x13, sizeof(edid_info.dummy)); #endif }
/* * On dom0, invoke the underlying driver to update the physical RTC, * and tell the hypervisor to update its idea of global time. * * On domU, we don't have permission to update the machine's physical RTC, * so quietly ignore the attempt. */ static void todxen_set(tod_ops_t *top, timestruc_t ts) { xen_platform_op_t op; if (DOMAIN_IS_INITDOMAIN(xen_info)) { ASSERT(MUTEX_HELD(&tod_lock)); TODOP_SET(top->tod_next, ts); op.cmd = XENPF_settime; op.interface_version = XENPF_INTERFACE_VERSION; op.u.settime.secs = ts.tv_sec - ggmtl(); op.u.settime.nsecs = ts.tv_nsec; op.u.settime.system_time = xpv_getsystime(); (void) HYPERVISOR_platform_op(&op); } }
efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules, unsigned long count, unsigned long sg_list) { struct xen_platform_op op = INIT_EFI_OP(update_capsule); if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; set_xen_guest_handle(efi_data(op).u.update_capsule.capsule_header_array, capsules); efi_data(op).u.update_capsule.capsule_count = count; efi_data(op).u.update_capsule.sg_list = sg_list; if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; return efi_data(op).status; }
int mtrr_del_page(int reg, unsigned long base, unsigned long size) { unsigned i; mtrr_type ltype; unsigned long lbase, lsize; int error = -EINVAL; struct xen_platform_op op; mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < num_var_ranges; ++i) { mtrr_if->get(i, &lbase, &lsize, <ype); if (lbase == base && lsize == size) { reg = i; break; } } if (reg < 0) { pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n", base, size); goto out; } } if (mtrr_usage_table[reg] < 1) { pr_warning("mtrr: reg: %d has count=0\n", reg); goto out; } if (--mtrr_usage_table[reg] < 1) { op.cmd = XENPF_del_memtype; op.u.del_memtype.handle = 0; op.u.del_memtype.reg = reg; error = HYPERVISOR_platform_op(&op); if (error) { BUG_ON(error > 0); goto out; } } error = reg; out: mutex_unlock(&mtrr_mutex); return error; }
efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { struct xen_platform_op op = INIT_EFI_OP(set_variable); set_xen_guest_handle(efi_data(op).u.set_variable.name, name); efi_data(op).misc = attr; BUILD_BUG_ON(sizeof(*vendor) != sizeof(efi_data(op).u.set_variable.vendor_guid)); memcpy(&efi_data(op).u.set_variable.vendor_guid, vendor, sizeof(*vendor)); efi_data(op).u.set_variable.size = data_size; set_xen_guest_handle(efi_data(op).u.set_variable.data, data); if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; return efi_data(op).status; }
efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space, u64 *remaining_space, u64 *max_variable_size) { struct xen_platform_op op = INIT_EFI_OP(query_variable_info); if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; efi_data(op).u.query_variable_info.attr = attr; if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *storage_space = efi_data(op).u.query_variable_info.max_store_size; *remaining_space = efi_data(op).u.query_variable_info.remain_store_size; *max_variable_size = efi_data(op).u.query_variable_info.max_size; return efi_data(op).status; }
efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) { struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time); if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; if (tm) { BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.get_wakeup_time)); memcpy(tm, &efi_data(op).u.get_wakeup_time, sizeof(*tm)); } if (enabled) *enabled = !!(efi_data(op).misc & XEN_EFI_GET_WAKEUP_TIME_ENABLED); if (pending) *pending = !!(efi_data(op).misc & XEN_EFI_GET_WAKEUP_TIME_PENDING); return efi_data(op).status; }
efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { struct xen_platform_op op = INIT_EFI_OP(get_time); if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; if (tm) { BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.get_time.time)); memcpy(tm, &efi_data(op).u.get_time.time, sizeof(*tm)); } if (tc) { tc->resolution = efi_data(op).u.get_time.resolution; tc->accuracy = efi_data(op).u.get_time.accuracy; tc->sets_to_zero = !!(efi_data(op).misc & XEN_EFI_GET_TIME_SET_CLEARS_NS); } return efi_data(op).status; }
efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules, unsigned long count, u64 *max_size, int *reset_type) { struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities); if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; set_xen_guest_handle(efi_data(op).u.query_capsule_capabilities.capsule_header_array, capsules); efi_data(op).u.query_capsule_capabilities.capsule_count = count; if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *max_size = efi_data(op).u.query_capsule_capabilities.max_capsule_size; *reset_type = efi_data(op).u.query_capsule_capabilities.reset_type; return efi_data(op).status; }
efi_status_t xen_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name); efi_data(op).u.get_next_variable_name.size = *name_size; set_xen_guest_handle(efi_data(op).u.get_next_variable_name.name, name); BUILD_BUG_ON(sizeof(*vendor) != sizeof(efi_data(op).u.get_next_variable_name.vendor_guid)); memcpy(&efi_data(op).u.get_next_variable_name.vendor_guid, vendor, sizeof(*vendor)); if (HYPERVISOR_platform_op(&op) < 0) return EFI_UNSUPPORTED; *name_size = efi_data(op).u.get_next_variable_name.size; memcpy(vendor, &efi_data(op).u.get_next_variable_name.vendor_guid, sizeof(*vendor)); return efi_data(op).status; }
static int do_microcode_update (const void __user *ubuf, size_t len) { int err; void *kbuf; kbuf = vmalloc(len); if (!kbuf) return -ENOMEM; if (copy_from_user(kbuf, ubuf, len) == 0) { struct xen_platform_op op; op.cmd = XENPF_microcode_update; set_xen_guest_handle(op.u.microcode.data, kbuf); op.u.microcode.length = len; err = HYPERVISOR_platform_op(&op); } else err = -EFAULT; vfree(kbuf); return err; }
static int privcmd_HYPERVISOR_platform_op(xen_platform_op_t *opp) { import_export_t op_ie, sub_ie, sub2_ie; xen_platform_op_t op; int error; if (import_buffer(&op_ie, opp, &op, sizeof (op), IE_IMPEXP) != 0) return (-X_EFAULT); sub_ie = null_ie; sub2_ie = null_ie; /* * Check this first because our wrapper will forcibly overwrite it. */ if (op.interface_version != XENPF_INTERFACE_VERSION) { error = -X_EACCES; export_buffer(&op_ie, &error); return (error); } /* * Now handle any platform ops with embedded pointers elsewhere * in the user address space that also need to be tacked down * while the hypervisor futzes with them. */ switch (op.cmd) { case XENPF_settime: case XENPF_add_memtype: case XENPF_del_memtype: case XENPF_read_memtype: case XENPF_platform_quirk: case XENPF_enter_acpi_sleep: case XENPF_change_freq: case XENPF_panic_init: break; case XENPF_microcode_update: error = import_handle(&sub_ie, &op.u.microcode.data, op.u.microcode.length, IE_IMPORT); break; case XENPF_getidletime: error = import_handle(&sub_ie, &op.u.getidletime.cpumap_bitmap, op.u.getidletime.cpumap_nr_cpus, IE_IMPEXP); if (error != 0) break; error = import_handle(&sub2_ie, &op.u.getidletime.idletime, op.u.getidletime.cpumap_nr_cpus * sizeof (uint64_t), IE_EXPORT); break; case XENPF_set_processor_pminfo: { size_t s; switch (op.u.set_pminfo.type) { case XEN_PM_PX: s = op.u.set_pminfo.u.perf.state_count * sizeof (xen_processor_px_t); if (op.u.set_pminfo.u.perf.flags & XEN_PX_PSS) { error = import_handle(&sub_ie, &op.u.set_pminfo.u.perf.states, s, IE_IMPORT); } break; case XEN_PM_CX: s = op.u.set_pminfo.u.power.count * sizeof (xen_processor_cx_t); error = import_handle(&sub_ie, &op.u.set_pminfo.u.power.states, s, IE_IMPORT); break; case XEN_PM_TX: break; default: error = -X_EINVAL; break; } break; } case XENPF_firmware_info: { uint16_t len; void *uaddr; switch (op.u.firmware_info.type) { case XEN_FW_DISK_INFO: /* * Ugh.. another hokey interface. The first 16 bits * of the buffer are also used as the (input) length. */ uaddr = uaddr_from_handle( &op.u.firmware_info.u.disk_info.edd_params); error = ddi_copyin(uaddr, &len, sizeof (len), 0); if (error != 0) break; error = import_handle(&sub_ie, &op.u.firmware_info.u.disk_info.edd_params, len, IE_IMPEXP); break; case XEN_FW_VBEDDC_INFO: error = import_handle(&sub_ie, &op.u.firmware_info.u.vbeddc_info.edid, 128, IE_EXPORT); break; case XEN_FW_DISK_MBR_SIGNATURE: default: break; } break; } default: /* FIXME: see this with non-existed ID 38 ???? */ #ifdef DEBUG printf("unrecognized HYPERVISOR_platform_op %d pid %d\n", op.cmd, curthread->t_procp->p_pid); #endif return (-X_EINVAL); } if (error == 0) error = HYPERVISOR_platform_op(&op); export_buffer(&op_ie, &error); export_buffer(&sub_ie, &error); export_buffer(&sub2_ie, &error); return (error); }
static int xen_cx_notifier(struct acpi_processor *pr, int action) { int ret, count = 0, i; xen_platform_op_t op = { .cmd = XENPF_set_processor_pminfo, .interface_version = XENPF_INTERFACE_VERSION, .u.set_pminfo.id = pr->acpi_id, .u.set_pminfo.type = XEN_PM_CX, }; struct xen_processor_cx *data, *buf; struct acpi_processor_cx *cx; /* Convert to Xen defined structure and hypercall */ buf = kzalloc(pr->power.count * sizeof(struct xen_processor_cx), GFP_KERNEL); if (!buf) return -ENOMEM; data = buf; for (i = 1; i <= pr->power.count; i++) { cx = &pr->power.states[i]; /* Skip invalid cstate entry */ if (!cx->valid) continue; data->type = cx->type; data->latency = cx->latency; data->power = cx->power; data->reg.space_id = cx->reg.space_id; data->reg.bit_width = cx->reg.bit_width; data->reg.bit_offset = cx->reg.bit_offset; data->reg.access_size = cx->reg.reserved; data->reg.address = cx->reg.address; /* Get dependency relationships */ if (cx->csd_count) { printk("Wow! _CSD is found. Not support for now!\n"); kfree(buf); return -EINVAL; } else { data->dpcnt = 0; set_xen_guest_handle(data->dp, NULL); } data++; count++; } if (!count) { printk("No available Cx info for cpu %d\n", pr->acpi_id); kfree(buf); return -EINVAL; } op.u.set_pminfo.u.power.count = count; op.u.set_pminfo.u.power.flags.bm_control = pr->flags.bm_control; op.u.set_pminfo.u.power.flags.bm_check = pr->flags.bm_check; op.u.set_pminfo.u.power.flags.has_cst = pr->flags.has_cst; op.u.set_pminfo.u.power.flags.power_setup_done = pr->flags.power_setup_done; set_xen_guest_handle(op.u.set_pminfo.u.power.states, buf); ret = HYPERVISOR_platform_op(&op); kfree(buf); return ret; } static int xen_px_notifier(struct acpi_processor *pr, int action) { int ret = -EINVAL; xen_platform_op_t op = { .cmd = XENPF_set_processor_pminfo, .interface_version = XENPF_INTERFACE_VERSION, .u.set_pminfo.id = pr->acpi_id, .u.set_pminfo.type = XEN_PM_PX, }; struct xen_processor_performance *perf; struct xen_processor_px *states = NULL; struct acpi_processor_performance *px; struct acpi_psd_package *pdomain; if (!pr) return -EINVAL; perf = &op.u.set_pminfo.u.perf; px = pr->performance; if (!px) return -EINVAL; switch(action) { case PROCESSOR_PM_CHANGE: /* ppc dynamic handle */ perf->flags = XEN_PX_PPC; perf->platform_limit = pr->performance_platform_limit; ret = HYPERVISOR_platform_op(&op); break; case PROCESSOR_PM_INIT: /* px normal init */ perf->flags = XEN_PX_PPC | XEN_PX_PCT | XEN_PX_PSS | XEN_PX_PSD; /* ppc */ perf->platform_limit = pr->performance_platform_limit; /* pct */ xen_convert_pct_reg(&perf->control_register, &px->control_register); xen_convert_pct_reg(&perf->status_register, &px->status_register); /* pss */ perf->state_count = px->state_count; states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL); if (!states) return -ENOMEM; xen_convert_pss_states(states, px->states, px->state_count); set_xen_guest_handle(perf->states, states); /* psd */ pdomain = &px->domain_info; xen_convert_psd_pack(&perf->domain_info, pdomain); if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) perf->shared_type = CPUFREQ_SHARED_TYPE_ALL; else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) perf->shared_type = CPUFREQ_SHARED_TYPE_ANY; else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) perf->shared_type = CPUFREQ_SHARED_TYPE_HW; else { ret = -ENODEV; kfree(states); break; } ret = HYPERVISOR_platform_op(&op); kfree(states); break; default: break; } return ret; } static int xen_tx_notifier(struct acpi_processor *pr, int action) { return -EINVAL; } static int xen_hotplug_notifier(struct acpi_processor *pr, int event) { return -EINVAL; } static struct processor_extcntl_ops xen_extcntl_ops = { .hotplug = xen_hotplug_notifier, }; void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops) { unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8; if (!pmbits) return; if (pmbits & XEN_PROCESSOR_PM_CX) xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier; if (pmbits & XEN_PROCESSOR_PM_PX) xen_extcntl_ops.pm_ops[PM_TYPE_PERF] = xen_px_notifier; if (pmbits & XEN_PROCESSOR_PM_TX) xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier; *ops = &xen_extcntl_ops; }