Example #1
0
static void __init init_pvh_bootparams(void)
{
	struct xen_memory_map memmap;
	int rc;

	memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));

	memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
	set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
	if (rc) {
		xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
		BUG();
	}
	pvh_bootparams.e820_entries = memmap.nr_entries;

	if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
		pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
			ISA_START_ADDRESS;
		pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
			ISA_END_ADDRESS - ISA_START_ADDRESS;
		pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
			E820_TYPE_RESERVED;
		pvh_bootparams.e820_entries++;
	} else
		xen_raw_printk("Warning: Can fit ISA range into e820\n");

	pvh_bootparams.hdr.cmd_line_ptr =
		pvh_start_info.cmdline_paddr;

	/* The first module is always ramdisk. */
	if (pvh_start_info.nr_modules) {
		struct hvm_modlist_entry *modaddr =
			__va(pvh_start_info.modlist_paddr);
		pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
		pvh_bootparams.hdr.ramdisk_size = modaddr->size;
	}

	/*
	 * See Documentation/x86/boot.txt.
	 *
	 * Version 2.12 supports Xen entry point but we will use default x86/PC
	 * environment (i.e. hardware_subarch 0).
	 */
	pvh_bootparams.hdr.version = (2 << 8) | 12;
	pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */

	x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
}
Example #2
0
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
    unsigned cpu;
    unsigned int i;

    if (skip_ioapic_setup) {
        char *m = (max_cpus == 0) ?
                  "The nosmp parameter is incompatible with Xen; " \
                  "use Xen dom0_max_vcpus=1 parameter" :
                  "The noapic parameter is incompatible with Xen";

        xen_raw_printk(m);
        panic(m);
    }
    xen_init_lock_cpu(0);

    smp_store_cpu_info(0);
    cpu_data(0).x86_max_cores = 1;

    for_each_possible_cpu(i) {
        zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
        zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
        zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
    }
    set_cpu_sibling_map(0);

    if (xen_smp_intr_init(0))
        BUG();

    if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
        panic("could not allocate xen_cpu_initialized_map\n");

    cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));

    /* Restrict the possible_map according to max_cpus. */
    while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
        for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
            continue;
        set_cpu_possible(cpu, false);
    }

    for_each_possible_cpu (cpu) {
        struct task_struct *idle;

        if (cpu == 0)
            continue;

        idle = fork_idle(cpu);
        if (IS_ERR(idle))
            panic("failed fork for CPU %d", cpu);

        set_cpu_present(cpu, true);
    }
}
Example #3
0
/*
 * This routine (and those that it might call) should not use
 * anything that lives in .bss since that segment will be cleared later.
 */
void __init xen_prepare_pvh(void)
{
	u32 msr;
	u64 pfn;

	if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
		xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
				pvh_start_info.magic);
		BUG();
	}

	xen_pvh = 1;

	msr = cpuid_ebx(xen_cpuid_base() + 2);
	pfn = __pa(hypercall_page);
	wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));

	init_pvh_bootparams();
}