static unsigned int __init get_max_acpi_id(void)
{
	struct xenpf_pcpuinfo *info;
	struct xen_platform_op op = {
		.cmd = XENPF_get_cpuinfo,
		.interface_version = XENPF_INTERFACE_VERSION,
	};
	int ret = 0;
	unsigned int i, last_cpu, max_acpi_id = 0;

	info = &op.u.pcpu_info;
	info->xen_cpuid = 0;

	ret = HYPERVISOR_dom0_op(&op);
	if (ret)
		return NR_CPUS;

	
	last_cpu = op.u.pcpu_info.max_present;
	for (i = 0; i <= last_cpu; i++) {
		info->xen_cpuid = i;
		ret = HYPERVISOR_dom0_op(&op);
		if (ret)
			continue;
		max_acpi_id = max(info->acpi_id, max_acpi_id);
	}
	max_acpi_id *= 2; 
	pr_debug(DRV_NAME "Max ACPI ID: %u\n", max_acpi_id);
	return max_acpi_id;
}
示例#2
0
static unsigned int __init get_max_acpi_id(void)
{
	struct xenpf_pcpuinfo *info;
	struct xen_platform_op op = {
		.cmd = XENPF_get_cpuinfo,
		.interface_version = XENPF_INTERFACE_VERSION,
	};
	int ret = 0;
	unsigned int i, last_cpu, max_acpi_id = 0;

	info = &op.u.pcpu_info;
	info->xen_cpuid = 0;

	ret = HYPERVISOR_dom0_op(&op);
	if (ret)
		return NR_CPUS;

	/* The max_present is the same irregardless of the xen_cpuid */
	last_cpu = op.u.pcpu_info.max_present;
	for (i = 0; i <= last_cpu; i++) {
		info->xen_cpuid = i;
		ret = HYPERVISOR_dom0_op(&op);
		if (ret)
			continue;
		max_acpi_id = max(info->acpi_id, max_acpi_id);
	}
	max_acpi_id *= 2; /* Slack for CPU hotplug support. */
	pr_debug("Max ACPI ID: %u\n", max_acpi_id);
	return max_acpi_id;
}
示例#3
0
文件: efi.c 项目: 3null/linux
efi_system_table_t __init *xen_efi_probe(void)
{
	struct xen_platform_op op = {
		.cmd = XENPF_firmware_info,
		.u.firmware_info = {
			.type = XEN_FW_EFI_INFO,
			.index = XEN_FW_EFI_CONFIG_TABLE
		}
	};
	union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info;

	if (!xen_initial_domain() || HYPERVISOR_dom0_op(&op) < 0)
		return NULL;

	/* Here we know that Xen runs on EFI platform. */

	efi = efi_xen;

	efi_systab_xen.tables = info->cfg.addr;
	efi_systab_xen.nr_tables = info->cfg.nent;

	op.cmd = XENPF_firmware_info;
	op.u.firmware_info.type = XEN_FW_EFI_INFO;
	op.u.firmware_info.index = XEN_FW_EFI_VENDOR;
	info->vendor.bufsz = sizeof(vendor);
	set_xen_guest_handle(info->vendor.name, vendor);

	if (HYPERVISOR_dom0_op(&op) == 0) {
		efi_systab_xen.fw_vendor = __pa_symbol(vendor);
		efi_systab_xen.fw_revision = info->vendor.revision;
	} else
		efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN");

	op.cmd = XENPF_firmware_info;
	op.u.firmware_info.type = XEN_FW_EFI_INFO;
	op.u.firmware_info.index = XEN_FW_EFI_VERSION;

	if (HYPERVISOR_dom0_op(&op) == 0)
		efi_systab_xen.hdr.revision = info->version;

	op.cmd = XENPF_firmware_info;
	op.u.firmware_info.type = XEN_FW_EFI_INFO;
	op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION;

	if (HYPERVISOR_dom0_op(&op) == 0)
		efi.runtime_version = info->version;

	return &efi_systab_xen;
}
示例#4
0
static int xen_hotadd_cpu(struct acpi_processor *pr)
{
	int cpu_id, apic_id, pxm;
	struct xen_platform_op op;

	apic_id = xen_apic_id(pr->handle);
	if (apic_id < 0) {
		pr_err(PREFIX "Failed to get apic_id for acpi_id %d\n",
				pr->acpi_id);
		return -ENODEV;
	}

	pxm = xen_acpi_get_pxm(pr->handle);
	if (pxm < 0) {
		pr_err(PREFIX "Failed to get _PXM for acpi_id %d\n",
				pr->acpi_id);
		return pxm;
	}

	op.cmd = XENPF_cpu_hotadd;
	op.u.cpu_add.apic_id = apic_id;
	op.u.cpu_add.acpi_id = pr->acpi_id;
	op.u.cpu_add.pxm = pxm;

	cpu_id = HYPERVISOR_dom0_op(&op);
	if (cpu_id < 0)
		pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n",
				pr->acpi_id);

	return cpu_id;
}
示例#5
0
int mtrr_add_page(unsigned long base, unsigned long size, 
		  unsigned int type, char increment)
{
	int error;
	dom0_op_t op;

	mutex_lock(&mtrr_mutex);

	op.cmd = DOM0_ADD_MEMTYPE;
	op.u.add_memtype.mfn     = base;
	op.u.add_memtype.nr_mfns = size;
	op.u.add_memtype.type    = type;
	error = HYPERVISOR_dom0_op(&op);
	if (error) {
		mutex_unlock(&mtrr_mutex);
		BUG_ON(error > 0);
		return error;
	}

	if (increment)
		++usage_table[op.u.add_memtype.reg];

	mutex_unlock(&mtrr_mutex);

	return op.u.add_memtype.reg;
}
static u32 xen_apic_read(u32 reg)
{
	struct xen_platform_op op = {
		.cmd = XENPF_get_cpuinfo,
		.interface_version = XENPF_INTERFACE_VERSION,
		.u.pcpu_info.xen_cpuid = 0,
	};
	int ret = 0;

	if (!xen_initial_domain() || smp_processor_id())
		return 0;

	if (reg == APIC_LVR)
		return 0x10;

	if (reg != APIC_ID)
		return 0;

	ret = HYPERVISOR_dom0_op(&op);
	if (ret)
		return 0;

	return op.u.pcpu_info.apic_id << 24;
}

static void xen_apic_write(u32 reg, u32 val)
{
	
	WARN_ON(1);
}
示例#7
0
static u32 xen_apic_read(u32 reg)
{
	struct xen_platform_op op = {
		.cmd = XENPF_get_cpuinfo,
		.interface_version = XENPF_INTERFACE_VERSION,
		.u.pcpu_info.xen_cpuid = 0,
	};
	int ret = 0;

	/* Shouldn't need this as APIC is turned off for PV, and we only
	 * get called on the bootup processor. But just in case. */
	if (!xen_initial_domain() || smp_processor_id())
		return 0;

	if (reg == APIC_LVR)
		return 0x10;

	if (reg != APIC_ID)
		return 0;

	ret = HYPERVISOR_dom0_op(&op);
	if (ret)
		return 0;

	return op.u.pcpu_info.apic_id << 24;
}

static void xen_apic_write(u32 reg, u32 val)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}
示例#8
0
文件: time.c 项目: 0-T-0/ps4-linux
static int xen_pvclock_gtod_notify(struct notifier_block *nb,
				   unsigned long was_set, void *priv)
{
	/* Protected by the calling core code serialization */
	static struct timespec next_sync;

	struct xen_platform_op op;
	struct timespec now;

	now = __current_kernel_time();

	/*
	 * We only take the expensive HV call when the clock was set
	 * or when the 11 minutes RTC synchronization time elapsed.
	 */
	if (!was_set && timespec_compare(&now, &next_sync) < 0)
		return NOTIFY_OK;

	op.cmd = XENPF_settime;
	op.u.settime.secs = now.tv_sec;
	op.u.settime.nsecs = now.tv_nsec;
	op.u.settime.system_time = xen_clocksource_read();

	(void)HYPERVISOR_dom0_op(&op);

	/*
	 * Move the next drift compensation time 11 minutes
	 * ahead. That's emulating the sync_cmos_clock() update for
	 * the hardware RTC.
	 */
	next_sync = now;
	next_sync.tv_sec += 11 * 60;

	return NOTIFY_OK;
}
示例#9
0
文件: acpi.c 项目: 19Dan01/linux
static int xen_acpi_notify_hypervisor_state(u8 sleep_state,
					    u32 val_a, u32 val_b,
					    bool extended)
{
	unsigned int bits = extended ? 8 : 16;

	struct xen_platform_op op = {
		.cmd = XENPF_enter_acpi_sleep,
		.interface_version = XENPF_INTERFACE_VERSION,
		.u.enter_acpi_sleep = {
			.val_a = (u16)val_a,
			.val_b = (u16)val_b,
			.sleep_state = sleep_state,
			.flags = extended ? XENPF_ACPI_SLEEP_EXTENDED : 0,
		},
	};

	if (WARN((val_a & (~0 << bits)) || (val_b & (~0 << bits)),
		 "Using more than %u bits of sleep control values %#x/%#x!"
		 "Email [email protected] - Thank you.\n", \
		 bits, val_a, val_b))
		return -1;

	HYPERVISOR_dom0_op(&op);
	return 1;
}
static int xen_acpi_pad_idle_cpus(unsigned int idle_nums)
{
	struct xen_platform_op op;

	op.cmd = XENPF_core_parking;
	op.u.core_parking.type = XEN_CORE_PARKING_SET;
	op.u.core_parking.idle_nums = idle_nums;

	return HYPERVISOR_dom0_op(&op);
}
示例#11
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
{
	struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count);

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	*count = efi_data(op).misc;

	return efi_data(op).status;
}
示例#12
0
static void __cpuinit set_num_var_ranges(void)
{
	dom0_op_t op;

	for (num_var_ranges = 0; ; num_var_ranges++) {
		op.cmd = DOM0_READ_MEMTYPE;
		op.u.read_memtype.reg = num_var_ranges;
		if (HYPERVISOR_dom0_op(&op) != 0)
			break;
	}
}
示例#13
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_set_time(efi_time_t *tm)
{
	struct xen_platform_op op = INIT_EFI_OP(set_time);

	BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.set_time));
	memcpy(&efi_data(op).u.set_time, tm, sizeof(*tm));

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	return efi_data(op).status;
}
示例#14
0
void generic_get_mtrr(unsigned int reg, unsigned long *base,
		      unsigned int *size, mtrr_type * type)
{
	dom0_op_t op;

	op.cmd = DOM0_READ_MEMTYPE;
	op.u.read_memtype.reg = reg;
	(void)HYPERVISOR_dom0_op(&op);

	*size = op.u.read_memtype.nr_mfns;
	*base = op.u.read_memtype.mfn;
	*type = op.u.read_memtype.type;
}
示例#15
0
static u32 xen_apic_read(u32 reg)
{
	struct xen_platform_op op = {
		.cmd = XENPF_get_cpuinfo,
		.interface_version = XENPF_INTERFACE_VERSION,
		.u.pcpu_info.xen_cpuid = 0,
	};
	int ret = 0;

	/* Shouldn't need this as APIC is turned off for PV, and we only
	 * get called on the bootup processor. But just in case. */
	if (!xen_initial_domain() || smp_processor_id())
		return 0;

	if (reg == APIC_LVR)
		return 0x10;
#ifdef CONFIG_X86_32
	if (reg == APIC_LDR)
		return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
#endif
	if (reg != APIC_ID)
		return 0;

	ret = HYPERVISOR_dom0_op(&op);
	if (ret)
		op.u.pcpu_info.apic_id = BAD_APICID;

	return op.u.pcpu_info.apic_id << 24;
}

static void xen_apic_write(u32 reg, u32 val)
{
	if (reg == APIC_LVTPC) {
		(void)pmu_apic_update(reg);
		return;
	}

	/* Warn to see if there's any stray references */
	WARN(1,"register: %x, value: %x\n", reg, val);
}

static u64 xen_apic_icr_read(void)
{
	return 0;
}

static void xen_apic_icr_write(u32 low, u32 id)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}
示例#16
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
	struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time);

	BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.set_wakeup_time));
	if (enabled)
		efi_data(op).misc = XEN_EFI_SET_WAKEUP_TIME_ENABLE;
	if (tm)
		memcpy(&efi_data(op).u.set_wakeup_time, tm, sizeof(*tm));
	else
		efi_data(op).misc |= XEN_EFI_SET_WAKEUP_TIME_ENABLE_ONLY;

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	return efi_data(op).status;
}
示例#17
0
int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
	unsigned i;
	mtrr_type ltype;
	unsigned long lbase;
	unsigned int lsize;
	int error = -EINVAL;
	dom0_op_t op;

	mutex_lock(&mtrr_mutex);

	if (reg < 0) {
		/*  Search for existing MTRR  */
		for (i = 0; i < num_var_ranges; ++i) {
			mtrr_if->get(i, &lbase, &lsize, &ltype);
			if (lbase == base && lsize == size) {
				reg = i;
				break;
			}
		}
		if (reg < 0) {
			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
			       size);
			goto out;
		}
	}
	if (usage_table[reg] < 1) {
		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
		goto out;
	}
	if (--usage_table[reg] < 1) {
		op.cmd = DOM0_DEL_MEMTYPE;
		op.u.del_memtype.handle = 0;
		op.u.del_memtype.reg    = reg;
		error = HYPERVISOR_dom0_op(&op);
		if (error) {
			BUG_ON(error > 0);
			goto out;
		}
	}
	error = reg;
 out:
	mutex_unlock(&mtrr_mutex);
	return error;
}
示例#18
0
static int xen_set_wallclock(unsigned long now)
{
	struct xen_platform_op op;
	int rc;

	/* do nothing for domU */
	if (!xen_initial_domain())
		return -1;

	op.cmd = XENPF_settime;
	op.u.settime.secs = now;
	op.u.settime.nsecs = 0;
	op.u.settime.system_time = xen_clocksource_read();

	rc = HYPERVISOR_dom0_op(&op);
	WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now);

	return rc;
}
示例#19
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
					   unsigned long count,
					   unsigned long sg_list)
{
	struct xen_platform_op op = INIT_EFI_OP(update_capsule);

	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
		return EFI_UNSUPPORTED;

	set_xen_guest_handle(efi_data(op).u.update_capsule.capsule_header_array,
			     capsules);
	efi_data(op).u.update_capsule.capsule_count = count;
	efi_data(op).u.update_capsule.sg_list = sg_list;

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	return efi_data(op).status;
}
示例#20
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_query_variable_info(u32 attr,
						u64 *storage_space,
						u64 *remaining_space,
						u64 *max_variable_size)
{
	struct xen_platform_op op = INIT_EFI_OP(query_variable_info);

	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
		return EFI_UNSUPPORTED;

	efi_data(op).u.query_variable_info.attr = attr;

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	*storage_space = efi_data(op).u.query_variable_info.max_store_size;
	*remaining_space = efi_data(op).u.query_variable_info.remain_store_size;
	*max_variable_size = efi_data(op).u.query_variable_info.max_size;

	return efi_data(op).status;
}
示例#21
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_set_variable(efi_char16_t *name,
					 efi_guid_t *vendor,
					 u32 attr,
					 unsigned long data_size,
					 void *data)
{
	struct xen_platform_op op = INIT_EFI_OP(set_variable);

	set_xen_guest_handle(efi_data(op).u.set_variable.name, name);
	efi_data(op).misc = attr;
	BUILD_BUG_ON(sizeof(*vendor) !=
		     sizeof(efi_data(op).u.set_variable.vendor_guid));
	memcpy(&efi_data(op).u.set_variable.vendor_guid, vendor, sizeof(*vendor));
	efi_data(op).u.set_variable.size = data_size;
	set_xen_guest_handle(efi_data(op).u.set_variable.data, data);

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	return efi_data(op).status;
}
示例#22
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
	struct xen_platform_op op = INIT_EFI_OP(get_time);

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	if (tm) {
		BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.get_time.time));
		memcpy(tm, &efi_data(op).u.get_time.time, sizeof(*tm));
	}

	if (tc) {
		tc->resolution = efi_data(op).u.get_time.resolution;
		tc->accuracy = efi_data(op).u.get_time.accuracy;
		tc->sets_to_zero = !!(efi_data(op).misc &
				      XEN_EFI_GET_TIME_SET_CLEARS_NS);
	}

	return efi_data(op).status;
}
示例#23
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
					       unsigned long count,
					       u64 *max_size,
					       int *reset_type)
{
	struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities);

	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
		return EFI_UNSUPPORTED;

	set_xen_guest_handle(efi_data(op).u.query_capsule_capabilities.capsule_header_array,
					capsules);
	efi_data(op).u.query_capsule_capabilities.capsule_count = count;

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	*max_size = efi_data(op).u.query_capsule_capabilities.max_capsule_size;
	*reset_type = efi_data(op).u.query_capsule_capabilities.reset_type;

	return efi_data(op).status;
}
示例#24
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled,
					    efi_bool_t *pending,
					    efi_time_t *tm)
{
	struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time);

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	if (tm) {
		BUILD_BUG_ON(sizeof(*tm) != sizeof(efi_data(op).u.get_wakeup_time));
		memcpy(tm, &efi_data(op).u.get_wakeup_time, sizeof(*tm));
	}

	if (enabled)
		*enabled = !!(efi_data(op).misc & XEN_EFI_GET_WAKEUP_TIME_ENABLED);

	if (pending)
		*pending = !!(efi_data(op).misc & XEN_EFI_GET_WAKEUP_TIME_PENDING);

	return efi_data(op).status;
}
示例#25
0
文件: efi.c 项目: 3null/linux
static efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
					      efi_char16_t *name,
					      efi_guid_t *vendor)
{
	struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name);

	efi_data(op).u.get_next_variable_name.size = *name_size;
	set_xen_guest_handle(efi_data(op).u.get_next_variable_name.name, name);
	BUILD_BUG_ON(sizeof(*vendor) !=
		     sizeof(efi_data(op).u.get_next_variable_name.vendor_guid));
	memcpy(&efi_data(op).u.get_next_variable_name.vendor_guid, vendor,
	       sizeof(*vendor));

	if (HYPERVISOR_dom0_op(&op) < 0)
		return EFI_UNSUPPORTED;

	*name_size = efi_data(op).u.get_next_variable_name.size;
	memcpy(vendor, &efi_data(op).u.get_next_variable_name.vendor_guid,
	       sizeof(*vendor));

	return efi_data(op).status;
}
示例#26
0
文件: clock.c 项目: MarginC/kame
void
resettodr()
{
#ifdef DOM0OPS
	dom0_op_t op;
	int s;
#endif
#ifdef DEBUG_CLOCK
	struct clock_ymdhms dt;
#endif

	/*
	 * We might have been called by boot() due to a crash early
	 * on.  Don't reset the clock chip in this case.
	 */
	if (!timeset)
		return;

#ifdef DEBUG_CLOCK
	clock_secs_to_ymdhms(time.tv_sec - rtc_offset * 60, &dt);

	printf("setclock: %d/%d/%d %02d:%02d:%02d\n", dt.dt_year,
	    dt.dt_mon, dt.dt_day, dt.dt_hour, dt.dt_min, dt.dt_sec);
#endif
#ifdef DOM0OPS
	if (xen_start_info.dom_id == 0) {
		s = splclock();

		op.cmd = DOM0_SETTIME;
		op.u.settime.secs	 = time.tv_sec - rtc_offset * 60;
		op.u.settime.usecs	 = time.tv_usec;
		op.u.settime.system_time = shadow_system_time;
		HYPERVISOR_dom0_op(&op);

		splx(s);
	}
#endif
}
示例#27
0
static bool __init xen_check_mwait(void)
{
#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \
	!defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
	struct xen_platform_op op = {
		.cmd			= XENPF_set_processor_pminfo,
		.u.set_pminfo.id	= -1,
		.u.set_pminfo.type	= XEN_PM_PDC,
	};
	uint32_t buf[3];
	unsigned int ax, bx, cx, dx;
	unsigned int mwait_mask;

	/* We need to determine whether it is OK to expose the MWAIT
	 * capability to the kernel to harvest deeper than C3 states from ACPI
	 * _CST using the processor_harvest_xen.c module. For this to work, we
	 * need to gather the MWAIT_LEAF values (which the cstate.c code
	 * checks against). The hypervisor won't expose the MWAIT flag because
	 * it would break backwards compatibility; so we will find out directly
	 * from the hardware and hypercall.
	 */
	if (!xen_initial_domain())
		return false;

	ax = 1;
	cx = 0;

	native_cpuid(&ax, &bx, &cx, &dx);

	mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
		     (1 << (X86_FEATURE_MWAIT % 32));

	if ((cx & mwait_mask) != mwait_mask)
		return false;

	/* We need to emulate the MWAIT_LEAF and for that we need both
	 * ecx and edx. The hypercall provides only partial information.
	 */

	ax = CPUID_MWAIT_LEAF;
	bx = 0;
	cx = 0;
	dx = 0;

	native_cpuid(&ax, &bx, &cx, &dx);

	/* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
	 * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
	 */
	buf[0] = ACPI_PDC_REVISION_ID;
	buf[1] = 1;
	buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);

	set_xen_guest_handle(op.u.set_pminfo.pdc, buf);

	if ((HYPERVISOR_dom0_op(&op) == 0) &&
	    (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
		cpuid_leaf5_ecx_val = cx;
		cpuid_leaf5_edx_val = dx;
	}
	return true;
#else
	return false;
#endif
}
static void __init xen_init_cpuid_mask(void)
{
	unsigned int ax, bx, cx, dx;
	unsigned int xsave_mask;

	cpuid_leaf1_edx_mask =
		~((1 << X86_FEATURE_MCE)  |  /* disable MCE */
		  (1 << X86_FEATURE_MCA)  |  /* disable MCA */
		  (1 << X86_FEATURE_MTRR) |  /* disable MTRR */
		  (1 << X86_FEATURE_ACC));   /* thermal monitoring */

	if (!xen_initial_domain())
		cpuid_leaf1_edx_mask &=
			~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
			  (1 << X86_FEATURE_ACPI));  /* disable ACPI */
	ax = 1;
	cx = 0;
	xen_cpuid(&ax, &bx, &cx, &dx);

	xsave_mask =
		(1 << (X86_FEATURE_XSAVE % 32)) |
		(1 << (X86_FEATURE_OSXSAVE % 32));

	/* Xen will set CR4.OSXSAVE if supported and not disabled by force */
	if ((cx & xsave_mask) != xsave_mask)
		cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
	if (xen_check_mwait())
		cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
}
static bool __init xen_check_mwait(void)
{
#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \
	!defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
	struct xen_platform_op op = {
		.cmd			= XENPF_set_processor_pminfo,
		.u.set_pminfo.id	= -1,
		.u.set_pminfo.type	= XEN_PM_PDC,
	};
	uint32_t buf[3];
	unsigned int ax, bx, cx, dx;
	unsigned int mwait_mask;

	if (!xen_initial_domain())
		return false;

	ax = 1;
	cx = 0;

	native_cpuid(&ax, &bx, &cx, &dx);

	mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
		     (1 << (X86_FEATURE_MWAIT % 32));

	if ((cx & mwait_mask) != mwait_mask)
		return false;


	ax = CPUID_MWAIT_LEAF;
	bx = 0;
	cx = 0;
	dx = 0;

	native_cpuid(&ax, &bx, &cx, &dx);

	buf[0] = ACPI_PDC_REVISION_ID;
	buf[1] = 1;
	buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);

	set_xen_guest_handle(op.u.set_pminfo.pdc, buf);

	if ((HYPERVISOR_dom0_op(&op) == 0) &&
	    (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
		cpuid_leaf5_ecx_val = cx;
		cpuid_leaf5_edx_val = dx;
	}
	return true;
#else
	return false;
#endif
}
static void __init xen_init_cpuid_mask(void)
{
	unsigned int ax, bx, cx, dx;
	unsigned int xsave_mask;

	cpuid_leaf1_edx_mask =
		~((1 << X86_FEATURE_MCE)  |  
		  (1 << X86_FEATURE_MCA)  |  
		  (1 << X86_FEATURE_MTRR) |  
		  (1 << X86_FEATURE_ACC));   

	if (!xen_initial_domain())
		cpuid_leaf1_edx_mask &=
			~((1 << X86_FEATURE_APIC) |  
			  (1 << X86_FEATURE_ACPI));  
	ax = 1;
	cx = 0;
	xen_cpuid(&ax, &bx, &cx, &dx);

	xsave_mask =
		(1 << (X86_FEATURE_XSAVE % 32)) |
		(1 << (X86_FEATURE_OSXSAVE % 32));

	
	if ((cx & xsave_mask) != xsave_mask)
		cpuid_leaf1_ecx_mask &= ~xsave_mask; 
	if (xen_check_mwait())
		cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
}
static int push_pxx_to_hypervisor(struct acpi_processor *_pr)
{
	int ret = 0;
	struct xen_platform_op op = {
		.cmd			= XENPF_set_processor_pminfo,
		.interface_version	= XENPF_INTERFACE_VERSION,
		.u.set_pminfo.id	= _pr->acpi_id,
		.u.set_pminfo.type	= XEN_PM_PX,
	};
	struct xen_processor_performance *dst_perf;
	struct xen_processor_px *dst_states = NULL;

	dst_perf = &op.u.set_pminfo.perf;

	dst_perf->platform_limit = _pr->performance_platform_limit;
	dst_perf->flags |= XEN_PX_PPC;
	xen_copy_pct_data(&(_pr->performance->control_register),
			  &dst_perf->control_register);
	xen_copy_pct_data(&(_pr->performance->status_register),
			  &dst_perf->status_register);
	dst_perf->flags |= XEN_PX_PCT;
	dst_states = xen_copy_pss_data(_pr, dst_perf);
	if (!IS_ERR_OR_NULL(dst_states)) {
		set_xen_guest_handle(dst_perf->states, dst_states);
		dst_perf->flags |= XEN_PX_PSS;
	}
	if (!xen_copy_psd_data(_pr, dst_perf))
		dst_perf->flags |= XEN_PX_PSD;

	if (dst_perf->flags != (XEN_PX_PSD | XEN_PX_PSS | XEN_PX_PCT | XEN_PX_PPC)) {
		pr_warn(DRV_NAME "ACPI CPU%u missing some P-state data (%x), skipping.\n",
			_pr->acpi_id, dst_perf->flags);
		ret = -ENODEV;
		goto err_free;
	}

	if (!no_hypercall)
		ret = HYPERVISOR_dom0_op(&op);

	if (!ret) {
		struct acpi_processor_performance *perf;
		unsigned int i;

		perf = _pr->performance;
		pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr->acpi_id);
		for (i = 0; i < perf->state_count; i++) {
			pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
			(i == perf->state ? '*' : ' '), i,
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);
		}
	} else if (ret != -EINVAL)
		pr_warn(DRV_NAME "(_PXX): Hypervisor error (%d) for ACPI CPU%u\n",
		       ret, _pr->acpi_id);
err_free:
	if (!IS_ERR_OR_NULL(dst_states))
		kfree(dst_states);

	return ret;
}
static int upload_pm_data(struct acpi_processor *_pr)
{
	int err = 0;

	mutex_lock(&acpi_ids_mutex);
	if (__test_and_set_bit(_pr->acpi_id, acpi_ids_done)) {
		mutex_unlock(&acpi_ids_mutex);
		return -EBUSY;
	}
	if (_pr->flags.power)
		err = push_cxx_to_hypervisor(_pr);

	if (_pr->performance && _pr->performance->states)
		err |= push_pxx_to_hypervisor(_pr);

	mutex_unlock(&acpi_ids_mutex);
	return err;
}
static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
{
	struct xen_platform_op op = {
		.cmd			= XENPF_set_processor_pminfo,
		.interface_version	= XENPF_INTERFACE_VERSION,
		.u.set_pminfo.id	= _pr->acpi_id,
		.u.set_pminfo.type	= XEN_PM_CX,
	};
	struct xen_processor_cx *dst_cx, *dst_cx_states = NULL;
	struct acpi_processor_cx *cx;
	unsigned int i, ok;
	int ret = 0;

	dst_cx_states = kcalloc(_pr->power.count,
				sizeof(struct xen_processor_cx), GFP_KERNEL);
	if (!dst_cx_states)
		return -ENOMEM;

	for (ok = 0, i = 1; i <= _pr->power.count; i++) {
		cx = &_pr->power.states[i];
		if (!cx->valid)
			continue;

		dst_cx = &(dst_cx_states[ok++]);

		dst_cx->reg.space_id = ACPI_ADR_SPACE_SYSTEM_IO;
		if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
			dst_cx->reg.bit_width = 8;
			dst_cx->reg.bit_offset = 0;
			dst_cx->reg.access_size = 1;
		} else {
			dst_cx->reg.space_id = ACPI_ADR_SPACE_FIXED_HARDWARE;
			if (cx->entry_method == ACPI_CSTATE_FFH) {
				
				dst_cx->reg.bit_offset = 2;
				dst_cx->reg.bit_width = 1; 
			}
			dst_cx->reg.access_size = 0;
		}
		dst_cx->reg.address = cx->address;

		dst_cx->type = cx->type;
		dst_cx->latency = cx->latency;
		dst_cx->power = cx->power;

		dst_cx->dpcnt = 0;
		set_xen_guest_handle(dst_cx->dp, NULL);
	}
	if (!ok) {
		pr_debug(DRV_NAME "No _Cx for ACPI CPU %u\n", _pr->acpi_id);
		kfree(dst_cx_states);
		return -EINVAL;
	}
	op.u.set_pminfo.power.count = ok;
	op.u.set_pminfo.power.flags.bm_control = _pr->flags.bm_control;
	op.u.set_pminfo.power.flags.bm_check = _pr->flags.bm_check;
	op.u.set_pminfo.power.flags.has_cst = _pr->flags.has_cst;
	op.u.set_pminfo.power.flags.power_setup_done =
		_pr->flags.power_setup_done;

	set_xen_guest_handle(op.u.set_pminfo.power.states, dst_cx_states);

	if (!no_hypercall)
		ret = HYPERVISOR_dom0_op(&op);

	if (!ret) {
		pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr->acpi_id);
		for (i = 1; i <= _pr->power.count; i++) {
			cx = &_pr->power.states[i];
			if (!cx->valid)
				continue;
			pr_debug("     C%d: %s %d uS\n",
				 cx->type, cx->desc, (u32)cx->latency);
		}
	} else if (ret != -EINVAL)
		pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n",
		       ret, _pr->acpi_id);

	kfree(dst_cx_states);

	return ret;
}
static struct xen_processor_px *
xen_copy_pss_data(struct acpi_processor *_pr,
		  struct xen_processor_performance *dst_perf)
{
	struct xen_processor_px *dst_states = NULL;
	unsigned int i;

	BUILD_BUG_ON(sizeof(struct xen_processor_px) !=
		     sizeof(struct acpi_processor_px));

	dst_states = kcalloc(_pr->performance->state_count,
			     sizeof(struct xen_processor_px), GFP_KERNEL);
	if (!dst_states)
		return ERR_PTR(-ENOMEM);

	dst_perf->state_count = _pr->performance->state_count;
	for (i = 0; i < _pr->performance->state_count; i++) {
		
		memcpy(&(dst_states[i]), &(_pr->performance->states[i]),
		       sizeof(struct acpi_processor_px));
	}
	return dst_states;
}
static int xen_copy_psd_data(struct acpi_processor *_pr,
			     struct xen_processor_performance *dst)
{
	struct acpi_psd_package *pdomain;

	BUILD_BUG_ON(sizeof(struct xen_psd_package) !=
		     sizeof(struct acpi_psd_package));

	dst->shared_type = _pr->performance->shared_type;

	pdomain = &(_pr->performance->domain_info);

	if (pdomain->num_processors <= 1) {
		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
			dst->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
			dst->shared_type = CPUFREQ_SHARED_TYPE_HW;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
			dst->shared_type = CPUFREQ_SHARED_TYPE_ANY;

	}
	memcpy(&(dst->domain_info), pdomain, sizeof(struct acpi_psd_package));
	return 0;
}
static int xen_copy_pct_data(struct acpi_pct_register *pct,
			     struct xen_pct_register *dst_pct)
{
	dst_pct->descriptor = pct->descriptor;
	dst_pct->length = pct->length;
	dst_pct->space_id = pct->space_id;
	dst_pct->bit_width = pct->bit_width;
	dst_pct->bit_offset = pct->bit_offset;
	dst_pct->reserved = pct->reserved;
	dst_pct->address = pct->address;
	return 0;
}