Ejemplo n.º 1
0
/*
 * Update the xenver data. We maintain two copies, boot and
 * current. If we are setting the boot, then also set current.
 */
static void
xen_set_version(xen_version_t idx)
{
	ulong_t ver;

	bzero(&xenver[idx], sizeof (xenver[idx]));

	ver = HYPERVISOR_xen_version(XENVER_version, 0);

	xenver[idx].xv_major = BITX(ver, 31, 16);
	xenver[idx].xv_minor = BITX(ver, 15, 0);

	(void) HYPERVISOR_xen_version(XENVER_extraversion, &xenver[idx].xv_ver);

	/*
	 * The revision is buried in the extraversion information that is
	 * maintained by the hypervisor. For our purposes we expect that
	 * the revision number is:
	 * 	- the second character in the extraversion information
	 *	- one character long
	 *	- numeric digit
	 * If it isn't then we can't extract the revision and we leave it
	 * set to 0.
	 */
	if (strlen(xenver[idx].xv_ver) > 1 && isdigit(xenver[idx].xv_ver[1]))
		xenver[idx].xv_revision = xenver[idx].xv_ver[1] - '0';
	else
		cmn_err(CE_WARN, "Cannot extract revision on this hypervisor "
		    "version: v%s, unexpected version format",
		    xenver[idx].xv_ver);

	xenver[idx].xv_is_xvm = 0;

	if (strstr(xenver[idx].xv_ver, "-xvm") != NULL)
		xenver[idx].xv_is_xvm = 1;

	(void) HYPERVISOR_xen_version(XENVER_changeset,
	    &xenver[idx].xv_chgset);

	(void) HYPERVISOR_xen_version(XENVER_compile_info,
	    &xenver[idx].xv_build);
	/*
	 * Capabilities are a set of space separated ascii strings
	 * e.g. 'xen-3.1-x86_32p' or 'hvm-3.2-x86_64'
	 */
	(void) HYPERVISOR_xen_version(XENVER_capabilities,
	    &xenver[idx].xv_caps);

	cmn_err(CE_CONT, "?v%lu.%lu%s chgset '%s'\n", xenver[idx].xv_major,
	    xenver[idx].xv_minor, xenver[idx].xv_ver, xenver[idx].xv_chgset);

	if (idx == XENVER_BOOT_IDX)
		bcopy(&xenver[XENVER_BOOT_IDX], &xenver[XENVER_CURRENT_IDX],
		    sizeof (xenver[XENVER_BOOT_IDX]));
}
Ejemplo n.º 2
0
static void __init xen_banner(void)
{
	unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
	struct xen_extraversion extra;
	HYPERVISOR_xen_version(XENVER_extraversion, &extra);

	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
	       pv_info.name);
	printk(KERN_INFO "Xen version: %d.%d%s%s\n",
	       version >> 16, version & 0xffff, extra.extraversion,
	       xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
}
Ejemplo n.º 3
0
static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
{
	int version = HYPERVISOR_xen_version(XENVER_version, NULL);
	if (version)
		return sprintf(buffer, "%d\n", version & 0xff);
	return -ENODEV;
}
Ejemplo n.º 4
0
static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
{
	int ret;

	ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
	if (ret > 0)
		ret = sprintf(buffer, "%x\n", ret);

	return ret;
}
Ejemplo n.º 5
0
static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
{
	xen_domain_handle_t uuid;
	int ret;
	ret = HYPERVISOR_xen_version(XENVER_guest_handle, uuid);
	if (ret)
		return uuid_show_fallback(attr, buffer);
	ret = sprintf(buffer, "%pU\n", uuid);
	return ret;
}
Ejemplo n.º 6
0
static ssize_t xen_feature_show(int index, char *buffer)
{
	ssize_t ret;
	struct xen_feature_info info;

	info.submap_idx = index;
	ret = HYPERVISOR_xen_version(XENVER_get_features, &info);
	if (!ret)
		ret = sprintf(buffer, "%08x", info.submap);

	return ret;
}
Ejemplo n.º 7
0
void xen_setup_features(void)
{
	struct xen_feature_info fi;
	int i, j;

	for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
		fi.submap_idx = i;
		if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
			break;
		for (j = 0; j < 32; j++)
			xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
	}
}
Ejemplo n.º 8
0
static int
privcmd_HYPERVISOR_xen_version(int cmd, void *arg)
{
	int error;
	int size = 0;
	import_export_t op_ie;
	uint32_t flags = IE_EXPORT;

	switch (cmd) {
	case XENVER_version:
		break;
	case XENVER_extraversion:
		size = sizeof (xen_extraversion_t);
		break;
	case XENVER_compile_info:
		size = sizeof (xen_compile_info_t);
		break;
	case XENVER_capabilities:
		size = sizeof (xen_capabilities_info_t);
		break;
	case XENVER_changeset:
		size = sizeof (xen_changeset_info_t);
		break;
	case XENVER_platform_parameters:
		size = sizeof (xen_platform_parameters_t);
		break;
	case XENVER_get_features:
		flags = IE_IMPEXP;
		size = sizeof (xen_feature_info_t);
		break;
	case XENVER_pagesize:
		break;
	case XENVER_guest_handle:
		size = sizeof (xen_domain_handle_t);
		break;

	default:
#ifdef DEBUG
		printf("unrecognized HYPERVISOR_xen_version op %d\n", cmd);
#endif
		return (-X_EINVAL);
	}

	error = import_buffer(&op_ie, arg, NULL, size, flags);
	if (error == 0)
		error = HYPERVISOR_xen_version(cmd, op_ie.ie_kaddr);
	export_buffer(&op_ie, &error);

	return (error);
}
Ejemplo n.º 9
0
static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
{
	int ret = -ENOMEM;
	struct xen_compile_info *info;

	info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
	if (info) {
		ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
		if (!ret)
			ret = sprintf(buffer, "%s\n", info->compile_date);
		kfree(info);
	}

	return ret;
}
Ejemplo n.º 10
0
static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
{
	int ret = -ENOMEM;
	char *cset;

	cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
	if (cset) {
		ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
		if (!ret)
			ret = sprintf(buffer, "%s\n", cset);
		kfree(cset);
	}

	return ret;
}
Ejemplo n.º 11
0
static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
{
	int ret = -ENOMEM;
	char *caps;

	caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
	if (caps) {
		ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
		if (!ret)
			ret = sprintf(buffer, "%s\n", caps);
		kfree(caps);
	}

	return ret;
}
Ejemplo n.º 12
0
static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
{
	int ret = -ENOMEM;
	char *extra;

	extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
	if (extra) {
		ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
		if (!ret)
			ret = sprintf(buffer, "%s\n", extra);
		kfree(extra);
	}

	return ret;
}
Ejemplo n.º 13
0
static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
{
	int ret = -ENOMEM;
	struct xen_platform_parameters *parms;

	parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
	if (parms) {
		ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
					     parms);
		if (!ret)
			ret = sprintf(buffer, "%lx\n", parms->virt_start);
		kfree(parms);
	}

	return ret;
}
Ejemplo n.º 14
0
/*
 * Attach the hypervisor.
 */
void
hypervisor_attach(device_t parent, device_t self, void *aux)
{

#if NPCI >0
#ifdef PCI_BUS_FIXUP
	int pci_maxbus = 0;
#endif
#endif /* NPCI */
	union hypervisor_attach_cookie hac;
	char xen_extra_version[XEN_EXTRAVERSION_LEN];
	static char xen_version_string[20];
	int rc;
	const struct sysctlnode *node = NULL;

	xenkernfs_init();

	xen_version = HYPERVISOR_xen_version(XENVER_version, NULL);
	memset(xen_extra_version, 0, sizeof(xen_extra_version));
	HYPERVISOR_xen_version(XENVER_extraversion, xen_extra_version);
	rc = snprintf(xen_version_string, 20, "%d.%d%s", XEN_MAJOR(xen_version),
		XEN_MINOR(xen_version), xen_extra_version);
	aprint_normal(": Xen version %s\n", xen_version_string);
	if (rc >= 20)
		aprint_debug(": xen_version_string truncated\n");

	sysctl_createv(NULL, 0, NULL, &node, 0,
	    CTLTYPE_NODE, "xen",
	    SYSCTL_DESCR("Xen top level node"),
	    NULL, 0, NULL, 0, CTL_MACHDEP, CTL_CREATE, CTL_EOL);

	if (node != NULL) {
		sysctl_createv(NULL, 0, &node, NULL, CTLFLAG_READONLY,
		    CTLTYPE_STRING, "version",
		    SYSCTL_DESCR("Xen hypervisor version"),
		    NULL, 0, xen_version_string, 0, CTL_CREATE, CTL_EOL);
	}

	aprint_verbose_dev(self, "features: ");
#define XEN_TST_F(n) \
	if (xen_feature(XENFEAT_##n)) \
		aprint_verbose(" %s", #n);

	XEN_TST_F(writable_page_tables);
	XEN_TST_F(writable_descriptor_tables);
	XEN_TST_F(auto_translated_physmap);
	XEN_TST_F(supervisor_mode_kernel);
	XEN_TST_F(pae_pgdir_above_4gb);
	XEN_TST_F(mmu_pt_update_preserve_ad);
	XEN_TST_F(highmem_assist);
	XEN_TST_F(gnttab_map_avail_bits);
	XEN_TST_F(hvm_callback_vector);
	XEN_TST_F(hvm_safe_pvclock);
	XEN_TST_F(hvm_pirqs);
#undef XEN_TST_F
	aprint_verbose("\n");

	xengnt_init();
	events_init();

	memset(&hac, 0, sizeof(hac));
	hac.hac_vcaa.vcaa_name = "vcpu";
	hac.hac_vcaa.vcaa_caa.cpu_number = 0;
	hac.hac_vcaa.vcaa_caa.cpu_role = CPU_ROLE_BP;
	hac.hac_vcaa.vcaa_caa.cpu_func = NULL; /* See xen/x86/cpu.c:vcpu_attach() */
	config_found_ia(self, "xendevbus", &hac.hac_vcaa, hypervisor_print);

#ifdef MULTIPROCESSOR

	/*
	 * The xenstore contains the configured number of vcpus.
	 * The xenstore however, is not accessible until much later in
	 * the boot sequence. We therefore bruteforce check for
	 * allocated vcpus (See: cpu.c:vcpu_match()) by iterating
	 * through the maximum supported by NetBSD MP.
	 */
	cpuid_t vcpuid;

	for (vcpuid = 1; vcpuid < maxcpus; vcpuid++) {
		memset(&hac, 0, sizeof(hac));
		hac.hac_vcaa.vcaa_name = "vcpu";
		hac.hac_vcaa.vcaa_caa.cpu_number = vcpuid;
		hac.hac_vcaa.vcaa_caa.cpu_role = CPU_ROLE_AP;
		hac.hac_vcaa.vcaa_caa.cpu_func = NULL; /* See xen/x86/cpu.c:vcpu_attach() */
		if (NULL == config_found_ia(self, "xendevbus", &hac.hac_vcaa,
			hypervisor_vcpu_print)) {
			break;
		}
	}

#endif /* MULTIPROCESSOR */

#if NXENBUS > 0
	memset(&hac, 0, sizeof(hac));
	hac.hac_xenbus.xa_device = "xenbus";
	config_found_ia(self, "xendevbus", &hac.hac_xenbus, hypervisor_print);
#endif
#if NXENCONS > 0
	memset(&hac, 0, sizeof(hac));
	hac.hac_xencons.xa_device = "xencons";
	config_found_ia(self, "xendevbus", &hac.hac_xencons, hypervisor_print);
#endif
#ifdef DOM0OPS
#if NPCI > 0
#if NACPICA > 0
	if (acpi_present) {
		memset(&hac, 0, sizeof(hac));
		hac.hac_acpi.aa_iot = x86_bus_space_io;
		hac.hac_acpi.aa_memt = x86_bus_space_mem;
		hac.hac_acpi.aa_pc = NULL;
		hac.hac_acpi.aa_pciflags =
			PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY |
			PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY |
			PCI_FLAGS_MWI_OKAY;
		hac.hac_acpi.aa_ic = &x86_isa_chipset;
		hac.hac_acpi.aa_dmat = &pci_bus_dma_tag;
#ifdef _LP64
		hac.hac_acpi.aa_dmat64 = &pci_bus_dma64_tag;
#else
		hac.hac_acpi.aa_dmat64 = NULL;
#endif /* _LP64 */
		config_found_ia(self, "acpibus", &hac.hac_acpi, 0);
	}
#endif /* NACPICA */
	memset(&hac, 0, sizeof(hac));
	hac.hac_pba.pba_iot = x86_bus_space_io;
	hac.hac_pba.pba_memt = x86_bus_space_mem;
	hac.hac_pba.pba_dmat = &pci_bus_dma_tag;
#ifdef _LP64
	hac.hac_pba.pba_dmat64 = &pci_bus_dma64_tag;
#else
	hac.hac_pba.pba_dmat64 = NULL;
#endif /* _LP64 */
	hac.hac_pba.pba_flags = PCI_FLAGS_MEM_OKAY | PCI_FLAGS_IO_OKAY;
	hac.hac_pba.pba_bridgetag = NULL;
	hac.hac_pba.pba_bus = 0;
#if NACPICA > 0 && defined(ACPI_SCANPCI)
	if (mpacpi_active)
		mp_pci_scan(self, &hac.hac_pba, pcibusprint);
	else
#endif
#if defined(MPBIOS) && defined(MPBIOS_SCANPCI)
	if (mpbios_scanned != 0)
		mp_pci_scan(self, &hac.hac_pba, pcibusprint);
	else
#endif
	config_found_ia(self, "pcibus", &hac.hac_pba, pcibusprint);
#if NACPICA > 0
	if (mp_verbose)
		acpi_pci_link_state();
#endif
#if NISA > 0
	if (isa_has_been_seen == 0) {
		memset(&hac, 0, sizeof(hac));
		hac.hac_iba._iba_busname = "isa";
		hac.hac_iba.iba_iot = x86_bus_space_io;
		hac.hac_iba.iba_memt = x86_bus_space_mem;
		hac.hac_iba.iba_dmat = &isa_bus_dma_tag;
		hac.hac_iba.iba_ic = NULL; /* No isa DMA yet */
		config_found_ia(self, "isabus", &hac.hac_iba, isabusprint);
	}
#endif /* NISA */
#endif /* NPCI */

	if (xendomain_is_privileged()) {
		xenprivcmd_init();
		xen_shm_init();
	}
#endif /* DOM0OPS */

	hypervisor_machdep_attach();

	if (!pmf_device_register(self, hypervisor_suspend, hypervisor_resume))
		aprint_error_dev(self, "couldn't establish power handler\n");

}
Ejemplo n.º 15
0
/*
 * Force a proper event-channel callback from Xen after clearing the
 * callback mask. We do this in a very simple manner, by making a call
 * down into Xen. The pending flag will be checked by Xen on return.
 */
void force_evtchn_callback(void)
{
	(void)HYPERVISOR_xen_version(0, NULL);
}
Ejemplo n.º 16
0
/*ARGSUSED*/
void
startup_kernel(void)
{
	char *cmdline;
	uintptr_t addr;
#if defined(__xpv)
	physdev_set_iopl_t set_iopl;
#endif /* __xpv */

	/*
	 * At this point we are executing in a 32 bit real mode.
	 */
#if defined(__xpv)
	cmdline = (char *)xen_info->cmd_line;
#else /* __xpv */
	cmdline = (char *)mb_info->cmdline;
#endif /* __xpv */

	prom_debug = (strstr(cmdline, "prom_debug") != NULL);
	map_debug = (strstr(cmdline, "map_debug") != NULL);

#if defined(__xpv)
	/*
	 * For dom0, before we initialize the console subsystem we'll
	 * need to enable io operations, so set I/O priveldge level to 1.
	 */
	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
		set_iopl.iopl = 1;
		(void) HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
	}
#endif /* __xpv */

	bcons_init(cmdline);
	DBG_MSG("\n\nSolaris prekernel set: ");
	DBG_MSG(cmdline);
	DBG_MSG("\n");

	if (strstr(cmdline, "multiboot") != NULL) {
		dboot_panic(NO_MULTIBOOT);
	}

	/*
	 * boot info must be 16 byte aligned for 64 bit kernel ABI
	 */
	addr = (uintptr_t)boot_info;
	addr = (addr + 0xf) & ~0xf;
	bi = (struct xboot_info *)addr;
	DBG((uintptr_t)bi);
	bi->bi_cmdline = (native_ptr_t)(uintptr_t)cmdline;

	/*
	 * Need correct target_kernel_text value
	 */
#if defined(_BOOT_TARGET_amd64)
	target_kernel_text = KERNEL_TEXT_amd64;
#elif defined(__xpv)
	target_kernel_text = KERNEL_TEXT_i386_xpv;
#else
	target_kernel_text = KERNEL_TEXT_i386;
#endif
	DBG(target_kernel_text);

#if defined(__xpv)

	/*
	 * XXPV	Derive this stuff from CPUID / what the hypervisor has enabled
	 */

#if defined(_BOOT_TARGET_amd64)
	/*
	 * 64-bit hypervisor.
	 */
	amd64_support = 1;
	pae_support = 1;

#else	/* _BOOT_TARGET_amd64 */

	/*
	 * See if we are running on a PAE Hypervisor
	 */
	{
		xen_capabilities_info_t caps;

		if (HYPERVISOR_xen_version(XENVER_capabilities, &caps) != 0)
			dboot_panic("HYPERVISOR_xen_version(caps) failed");
		caps[sizeof (caps) - 1] = 0;
		if (prom_debug)
			dboot_printf("xen capabilities %s\n", caps);
		if (strstr(caps, "x86_32p") != NULL)
			pae_support = 1;
	}

#endif	/* _BOOT_TARGET_amd64 */
	{
		xen_platform_parameters_t p;

		if (HYPERVISOR_xen_version(XENVER_platform_parameters, &p) != 0)
			dboot_panic("HYPERVISOR_xen_version(parms) failed");
		DBG(p.virt_start);
		mfn_to_pfn_mapping = (pfn_t *)(xen_virt_start = p.virt_start);
	}

	/*
	 * The hypervisor loads stuff starting at 1Gig
	 */
	mfn_base = ONE_GIG;
	DBG(mfn_base);

	/*
	 * enable writable page table mode for the hypervisor
	 */
	if (HYPERVISOR_vm_assist(VMASST_CMD_enable,
	    VMASST_TYPE_writable_pagetables) < 0)
		dboot_panic("HYPERVISOR_vm_assist(writable_pagetables) failed");

	/*
	 * check for NX support
	 */
	if (pae_support) {
		uint32_t eax = 0x80000000;
		uint32_t edx = get_cpuid_edx(&eax);

		if (eax >= 0x80000001) {
			eax = 0x80000001;
			edx = get_cpuid_edx(&eax);
			if (edx & CPUID_AMD_EDX_NX)
				NX_support = 1;
		}
	}

#if !defined(_BOOT_TARGET_amd64)

	/*
	 * The 32-bit hypervisor uses segmentation to protect itself from
	 * guests. This means when a guest attempts to install a flat 4GB
	 * code or data descriptor the 32-bit hypervisor will protect itself
	 * by silently shrinking the segment such that if the guest attempts
	 * any access where the hypervisor lives a #gp fault is generated.
	 * The problem is that some applications expect a full 4GB flat
	 * segment for their current thread pointer and will use negative
	 * offset segment wrap around to access data. TLS support in linux
	 * brand is one example of this.
	 *
	 * The 32-bit hypervisor can catch the #gp fault in these cases
	 * and emulate the access without passing the #gp fault to the guest
	 * but only if VMASST_TYPE_4gb_segments is explicitly turned on.
	 * Seems like this should have been the default.
	 * Either way, we want the hypervisor -- and not Solaris -- to deal
	 * to deal with emulating these accesses.
	 */
	if (HYPERVISOR_vm_assist(VMASST_CMD_enable,
	    VMASST_TYPE_4gb_segments) < 0)
		dboot_panic("HYPERVISOR_vm_assist(4gb_segments) failed");
#endif	/* !_BOOT_TARGET_amd64 */

#else	/* __xpv */

	/*
	 * use cpuid to enable MMU features
	 */
	if (have_cpuid()) {
		uint32_t eax, edx;

		eax = 1;
		edx = get_cpuid_edx(&eax);
		if (edx & CPUID_INTC_EDX_PSE)
			largepage_support = 1;
		if (edx & CPUID_INTC_EDX_PGE)
			pge_support = 1;
		if (edx & CPUID_INTC_EDX_PAE)
			pae_support = 1;

		eax = 0x80000000;
		edx = get_cpuid_edx(&eax);
		if (eax >= 0x80000001) {
			eax = 0x80000001;
			edx = get_cpuid_edx(&eax);
			if (edx & CPUID_AMD_EDX_LM)
				amd64_support = 1;
			if (edx & CPUID_AMD_EDX_NX)
				NX_support = 1;
		}
	} else {
		dboot_printf("cpuid not supported\n");
	}
#endif /* __xpv */


#if defined(_BOOT_TARGET_amd64)
	if (amd64_support == 0)
		dboot_panic("long mode not supported, rebooting");
	else if (pae_support == 0)
		dboot_panic("long mode, but no PAE; rebooting");
#else
	/*
	 * Allow the command line to over-ride use of PAE for 32 bit.
	 */
	if (strstr(cmdline, "disablePAE=true") != NULL) {
		pae_support = 0;
		NX_support = 0;
		amd64_support = 0;
	}
#endif

	/*
	 * initialize the simple memory allocator
	 */
	init_mem_alloc();

#if !defined(__xpv) && !defined(_BOOT_TARGET_amd64)
	/*
	 * disable PAE on 32 bit h/w w/o NX and < 4Gig of memory
	 */
	if (max_mem < FOUR_GIG && NX_support == 0)
		pae_support = 0;
#endif

	/*
	 * configure mmu information
	 */
	if (pae_support) {
		shift_amt = shift_amt_pae;
		ptes_per_table = 512;
		pte_size = 8;
		lpagesize = TWO_MEG;
#if defined(_BOOT_TARGET_amd64)
		top_level = 3;
#else
		top_level = 2;
#endif
	} else {
		pae_support = 0;
		NX_support = 0;
		shift_amt = shift_amt_nopae;
		ptes_per_table = 1024;
		pte_size = 4;
		lpagesize = FOUR_MEG;
		top_level = 1;
	}

	DBG(pge_support);
	DBG(NX_support);
	DBG(largepage_support);
	DBG(amd64_support);
	DBG(top_level);
	DBG(pte_size);
	DBG(ptes_per_table);
	DBG(lpagesize);

#if defined(__xpv)
	ktext_phys = ONE_GIG;		/* from UNIX Mapfile */
#else
	ktext_phys = FOUR_MEG;		/* from UNIX Mapfile */
#endif

#if !defined(__xpv) && defined(_BOOT_TARGET_amd64)
	/*
	 * For grub, copy kernel bits from the ELF64 file to final place.
	 */
	DBG_MSG("\nAllocating nucleus pages.\n");
	ktext_phys = (uintptr_t)do_mem_alloc(ksize, FOUR_MEG);
	if (ktext_phys == 0)
		dboot_panic("failed to allocate aligned kernel memory");
	if (dboot_elfload64(mb_header.load_addr) != 0)
		dboot_panic("failed to parse kernel ELF image, rebooting");
#endif

	DBG(ktext_phys);

	/*
	 * Allocate page tables.
	 */
	build_page_tables();

	/*
	 * return to assembly code to switch to running kernel
	 */
	entry_addr_low = (uint32_t)target_kernel_text;
	DBG(entry_addr_low);
	bi->bi_use_largepage = largepage_support;
	bi->bi_use_pae = pae_support;
	bi->bi_use_pge = pge_support;
	bi->bi_use_nx = NX_support;

#if defined(__xpv)

	bi->bi_next_paddr = next_avail_addr - mfn_base;
	DBG(bi->bi_next_paddr);
	bi->bi_next_vaddr = (native_ptr_t)next_avail_addr;
	DBG(bi->bi_next_vaddr);

	/*
	 * unmap unused pages in start area to make them available for DMA
	 */
	while (next_avail_addr < scratch_end) {
		(void) HYPERVISOR_update_va_mapping(next_avail_addr,
		    0, UVMF_INVLPG | UVMF_LOCAL);
		next_avail_addr += MMU_PAGESIZE;
	}

	bi->bi_xen_start_info = (uintptr_t)xen_info;
	DBG((uintptr_t)HYPERVISOR_shared_info);
	bi->bi_shared_info = (native_ptr_t)HYPERVISOR_shared_info;
	bi->bi_top_page_table = (uintptr_t)top_page_table - mfn_base;

#else /* __xpv */

	bi->bi_next_paddr = next_avail_addr;
	DBG(bi->bi_next_paddr);
	bi->bi_next_vaddr = (uintptr_t)next_avail_addr;
	DBG(bi->bi_next_vaddr);
	bi->bi_mb_info = (uintptr_t)mb_info;
	bi->bi_top_page_table = (uintptr_t)top_page_table;

#endif /* __xpv */

	bi->bi_kseg_size = FOUR_MEG;
	DBG(bi->bi_kseg_size);

#ifndef __xpv
	if (map_debug)
		dump_tables();
#endif

	DBG_MSG("\n\n*** DBOOT DONE -- back to asm to jump to kernel\n\n");
}
Ejemplo n.º 17
0
void
xen_hvm_init(void)
{
	struct cpuid_regs cp;
	uint32_t xen_signature[4], base;
	char *xen_str;
	struct xen_add_to_physmap xatp;
	xen_capabilities_info_t caps;
	pfn_t pfn;
	uint64_t msrval, val;
	extern int apix_enable;

	if (xen_hvm_inited != 0)
		return;

	xen_hvm_inited = 1;

	/*
	 * Xen's pseudo-cpuid function returns a string representing
	 * the Xen signature in %ebx, %ecx, and %edx.
	 * Loop over the base values, since it may be different if
	 * the hypervisor has hyper-v emulation switched on.
	 *
	 * %eax contains the maximum supported cpuid function.
	 */
	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
		cp.cp_eax = base;
		(void) __cpuid_insn(&cp);
		xen_signature[0] = cp.cp_ebx;
		xen_signature[1] = cp.cp_ecx;
		xen_signature[2] = cp.cp_edx;
		xen_signature[3] = 0;
		xen_str = (char *)xen_signature;
		if (strcmp("XenVMMXenVMM", xen_str)  == 0 &&
		    cp.cp_eax >= (base + 2))
			break;
	}
	if (base >= 0x40010000)
		return;

	/*
	 * cpuid function at base + 1 returns the Xen version in %eax.  The
	 * top 16 bits are the major version, the bottom 16 are the minor
	 * version.
	 */
	cp.cp_eax = base + 1;
	(void) __cpuid_insn(&cp);
	xen_major = cp.cp_eax >> 16;
	xen_minor = cp.cp_eax & 0xffff;

	/*
	 * Below version 3.1 we can't do anything special as a HVM domain;
	 * the PV drivers don't work, many hypercalls are not available,
	 * etc.
	 */
	if (xen_major < 3 || (xen_major == 3 && xen_minor < 1))
		return;

	/*
	 * cpuid function at base + 2 returns information about the
	 * hypercall page.  %eax nominally contains the number of pages
	 * with hypercall code, but according to the Xen guys, "I'll
	 * guarantee that remains one forever more, so you can just
	 * allocate a single page and get quite upset if you ever see CPUID
	 * return more than one page."  %ebx contains an MSR we use to ask
	 * Xen to remap each page at a specific pfn.
	 */
	cp.cp_eax = base + 2;
	(void) __cpuid_insn(&cp);

	/*
	 * Let Xen know where we want the hypercall page mapped.  We
	 * already have a page allocated in the .text section to simplify
	 * the wrapper code.
	 */
	pfn = va_to_pfn(&hypercall_page);
	msrval = mmu_ptob(pfn);
	wrmsr(cp.cp_ebx, msrval);

	/* Fill in the xen_info data */
	xen_info = &__xen_info;
	(void) sprintf(xen_info->magic, "xen-%d.%d", xen_major, xen_minor);

	if (hvm_get_param(HVM_PARAM_STORE_PFN, &val) < 0)
		return;
	/*
	 * The first hypercall worked, so mark hypercalls as working.
	 */
	xen_hvm_features |= XEN_HVM_HYPERCALLS;

	xen_info->store_mfn = (mfn_t)val;
	if (hvm_get_param(HVM_PARAM_STORE_EVTCHN, &val) < 0)
		return;
	xen_info->store_evtchn = (mfn_t)val;

	/* Figure out whether the hypervisor is 32-bit or 64-bit.  */
	if ((HYPERVISOR_xen_version(XENVER_capabilities, &caps) == 0)) {
		((char *)(caps))[sizeof (caps) - 1] = '\0';
		if (strstr(caps, "x86_64") != NULL)
			xen_bits = 64;
		else if (strstr(caps, "x86_32") != NULL)
			xen_bits = 32;
	}

	if (xen_bits < 0)
		return;
#ifdef __amd64
	ASSERT(xen_bits == 64);
#endif

	/*
	 * Allocate space for the shared_info page and tell Xen where it
	 * is.
	 */
	xen_shared_info_frame = va_to_pfn(&hypercall_shared_info_page);
	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = xen_shared_info_frame;
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
		return;

	HYPERVISOR_shared_info = (void *)&hypercall_shared_info_page;

	/*
	 * A working HVM tlb flush hypercall was introduced in Xen 3.3.
	 */
	if (xen_major > 3 || (xen_major == 3 && xen_minor >= 3))
		xen_hvm_features |= XEN_HVM_TLBFLUSH;

	/* FIXME Disable apix for the time being */
	apix_enable = 0;
}
Ejemplo n.º 18
0
void __init xen_start_kernel(void)
{
	unsigned int i;
	struct xen_machphys_mapping mapping;
	unsigned long machine_to_phys_nr_ents;
#ifdef CONFIG_X86_32
	struct xen_platform_parameters pp;
	extern pte_t swapper_pg_fixmap[PTRS_PER_PTE];
	unsigned long addr;
#endif

	xen_setup_features();

	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
		machine_to_phys_nr_ents = mapping.max_mfn + 1;
	} else
		machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
		machine_to_phys_order++;

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		phys_to_machine_mapping =
			(unsigned long *)xen_start_info->mfn_list;

	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_writable_pagetables));

	reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE),
		      __pa(xen_start_info->pt_base)
		      + (xen_start_info->nr_pt_frames << PAGE_SHIFT),
		      "Xen provided");

#ifdef CONFIG_X86_32
	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_4gb_segments));

	init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;

	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
		hypervisor_virt_start = pp.virt_start;
		reserve_top_address(0UL - pp.virt_start);
	}

	BUG_ON(pte_index(hypervisor_virt_start));

	/* Do an early initialization of the fixmap area */
	make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables);
	addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE);
	set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr),
				      addr),
			   addr),
		__pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE));
#else
	check_efer();
	xen_init_pt();
#endif

#define __FIXADDR_TOP (-PAGE_SIZE)
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \
			!= pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE)))
	FIX_BUG_ON(SHARED_INFO);
	FIX_BUG_ON(ISAMAP_BEGIN);
	FIX_BUG_ON(ISAMAP_END);
#undef pmd_index
#undef __FIXADDR_TOP

	/* Switch to the real shared_info page, and clear the dummy page. */
	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
	memset(empty_zero_page, 0, sizeof(empty_zero_page));

	setup_vcpu_info(0);

	/* Set up mapping of lowest 1MB of physical memory. */
	for (i = 0; i < NR_FIX_ISAMAPS; i++)
		if (is_initial_xendomain())
			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
		else
			__set_fixmap(FIX_ISAMAP_BEGIN - i,
				     virt_to_machine(empty_zero_page),
				     PAGE_KERNEL_RO);

}