Example #1
0
void mm_pin_all(void)
{
	struct page *page;
	if (xen_feature(XENFEAT_writable_page_tables))
	    return;
	for (page = pgd_list; page; page = (struct page *)page->index) {
		if (!test_bit(PG_pinned, &page->flags))
			__pgd_pin((pgd_t *)page_address(page));
	}
}
Example #2
0
void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
{
	if (xen_feature(feature))
		return;

	while (nr-- != 0) {
		make_page_writable(va, feature);
		va = (void *)((unsigned long)va + PAGE_SIZE);
	}
}
Example #3
0
void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
{
	if (xen_feature(feature))
		return;

	while (nr-- != 0) {
		__make_page_readonly(va);
		va = (void*)((unsigned long)va + PAGE_SIZE);
	}
}
Example #4
0
/*
 * Tell the hypervisor how to contact us for event channel callbacks.
 */
void
xen_hvm_set_callback(device_t dev)
{
	struct xen_hvm_param xhp;
	int irq;

	if (xen_vector_callback_enabled)
		return;

	xhp.domid = DOMID_SELF;
	xhp.index = HVM_PARAM_CALLBACK_IRQ;
	if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
		int error;

		error = set_percpu_callback(0);
		if (error == 0) {
			xen_evtchn_needs_ack = true;
			/* Trick toolstack to think we are enlightened */
			xhp.value = 1;
		} else
			xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
		error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
		if (error == 0) {
			xen_vector_callback_enabled = 1;
			return;
		} else if (xen_evtchn_needs_ack)
			panic("Unable to setup fake HVM param: %d", error);

		printf("Xen HVM callback vector registration failed (%d). "
		    "Falling back to emulated device interrupt\n", error);
	}
	xen_vector_callback_enabled = 0;
	if (dev == NULL) {
		/*
		 * Called from early boot or resume.
		 * xenpci will invoke us again later.
		 */
		return;
	}

	irq = pci_get_irq(dev);
	if (irq < 16) {
		xhp.value = HVM_CALLBACK_GSI(irq);
	} else {
		u_int slot;
		u_int pin;

		slot = pci_get_slot(dev);
		pin = pci_get_intpin(dev) - 1;
		xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
	}

	if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
		panic("Can't set evtchn callback");
}
Example #5
0
void xen_setup_mfn_list_list(void)
{
    if (xen_feature(XENFEAT_auto_translated_physmap))
        return;

    BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

    HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
        virt_to_mfn(p2m_top_mfn);
    HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
}
Example #6
0
void xen_machphys_update(unsigned long mfn, unsigned long pfn)
{
	mmu_update_t u;
	if (xen_feature(XENFEAT_auto_translated_physmap)) {
		BUG_ON(pfn != mfn);
		return;
	}
	u.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
	u.val = pfn;
	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
}
Example #7
0
bool xen_hvm_need_lapic(void)
{
	if (xen_nopv)
		return false;
	if (xen_pv_domain())
		return false;
	if (!xen_hvm_domain())
		return false;
	if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
		return false;
	return true;
}
Example #8
0
File: setup.c Project: mbgg/linux
static unsigned long __init xen_set_identity_and_release(
	const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
	phys_addr_t start = 0;
	unsigned long released = 0;
	unsigned long identity = 0;
	const struct e820entry *entry;
	int i;
	int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);

	/*
	 * Combine non-RAM regions and gaps until a RAM region (or the
	 * end of the map) is reached, then set the 1:1 map and
	 * release the pages (if available) in those non-RAM regions.
	 *
	 * The combined non-RAM regions are rounded to a whole number
	 * of pages so any partial pages are accessible via the 1:1
	 * mapping.  This is needed for some BIOSes that put (for
	 * example) the DMI tables in a reserved region that begins on
	 * a non-page boundary.
	 */
	for (i = 0, entry = list; i < map_size; i++, entry++) {
		phys_addr_t end = entry->addr + entry->size;
		if (entry->type == E820_RAM || i == map_size - 1) {
			unsigned long start_pfn = PFN_DOWN(start);
			unsigned long end_pfn = PFN_UP(end);

			if (entry->type == E820_RAM)
				end_pfn = PFN_UP(entry->addr);

			if (start_pfn < end_pfn) {
				if (xlated_phys) {
					xen_pvh_adjust_stats(start_pfn,
						end_pfn, &released, &identity,
						nr_pages);
				} else {
					xen_set_identity_and_release_chunk(
						start_pfn, end_pfn, nr_pages,
						&released, &identity);
				}
			}
			start = end;
		}
	}

	if (released)
		printk(KERN_INFO "Released %lu pages of unused memory\n", released);
	if (identity)
		printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);

	return released;
}
Example #9
0
File: smp.c Project: mbgg/linux
static void __init xen_smp_prepare_boot_cpu(void)
{
	BUG_ON(smp_processor_id() != 0);
	native_smp_prepare_boot_cpu();

	if (!xen_feature(XENFEAT_writable_page_tables)) {
		/* We've switched to the "real" per-cpu gdt, so make sure the
		 * old memory can be recycled */
		make_lowmem_page_readwrite(xen_initial_gdt);
	}
	xen_filter_cpu_maps();
	xen_setup_vcpu_info_placement();
}
Example #10
0
void make_lowmem_page_writable(void *va, unsigned int feature)
{
	pte_t *pte;
	int rc;

	if (xen_feature(feature))
		return;

	pte = virt_to_ptep(va);
	rc = HYPERVISOR_update_va_mapping(
		(unsigned long)va, pte_mkwrite(*pte), 0);
	BUG_ON(rc);
}
Example #11
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count)
{
    int i, ret;
    bool lazy = false;
    pte_t *pte;
    unsigned long mfn;

    ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
    if (ret)
        return ret;

    /* Retry eagain maps */
    for (i = 0; i < count; i++)
        if (map_ops[i].status == GNTST_eagain)
            gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
                                    &map_ops[i].status, __func__);

    if (xen_feature(XENFEAT_auto_translated_physmap))
        return ret;

    if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
        arch_enter_lazy_mmu_mode();
        lazy = true;
    }

    for (i = 0; i < count; i++) {
        /* Do not add to override if the map failed. */
        if (map_ops[i].status)
            continue;

        if (map_ops[i].flags & GNTMAP_contains_pte) {
            pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
                             (map_ops[i].host_addr & ~PAGE_MASK));
            mfn = pte_mfn(*pte);
        } else {
            mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
        }
        ret = m2p_add_override(mfn, pages[i], kmap_ops ?
                               &kmap_ops[i] : NULL);
        if (ret)
            goto out;
    }

out:
    if (lazy)
        arch_leave_lazy_mmu_mode();

    return ret;
}
Example #12
0
void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
		return;
	}

	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
		alloc_p2m(pfn);

		if (!__set_phys_to_machine(pfn, mfn))
			BUG();
	}
}
Example #13
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
		    struct page **pages, unsigned int count)
{
	int i, ret;
	pte_t *pte;
	unsigned long mfn;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	for (i = 0; i < count; i++) {
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			/* If you really wanted to do this:
			 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
			 *
			 * The reason we do not implement it is b/c on the
			 * unmap path (gnttab_unmap_refs) we have no means of
			 * checking whether the page is !GNTMAP_contains_pte.
			 *
			 * That is without some extra data-structure to carry
			 * the struct page, bool clear_pte, and list_head next
			 * tuples and deal with allocation/delallocation, etc.
			 *
			 * The users of this API set the GNTMAP_contains_pte
			 * flag so lets just return not supported until it
			 * becomes neccessary to implement.
			 */
			return -EOPNOTSUPP;
		}
		ret = m2p_add_override(mfn, pages[i],
				       map_ops[i].flags & GNTMAP_contains_pte);
		if (ret)
			return ret;
	}

	return ret;
}
Example #14
0
File: p2m.c Project: DenisLug/mptcp
/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
	unsigned long pfn;

	 if (xen_feature(XENFEAT_auto_translated_physmap))
		return;

	xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
	xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);

	for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
		xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;

	xen_max_p2m_pfn = xen_p2m_size;
}
Example #15
0
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
{
	unsigned long flags; /* can be called from interrupt context */

	if (PTRS_PER_PMD > 1) {
		if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
			xen_destroy_contiguous_region((unsigned long)pgd, 0);
	} else {
		spin_lock_irqsave(&pgd_lock, flags);
		pgd_list_del(pgd);
		spin_unlock_irqrestore(&pgd_lock, flags);

		pgd_test_and_unpin(pgd);
	}
}
void __init xen_hvm_init_time_ops(void)
{
	if (!xen_have_vector_callback)
		return;
	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
				"disable pv timer\n");
		return;
	}

	pv_time_ops = xen_time_ops;
	x86_init.timers.setup_percpu_clockev = xen_time_init;
	x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;

	x86_platform.calibrate_tsc = xen_tsc_khz;
	x86_platform.get_wallclock = xen_get_wallclock;
	x86_platform.set_wallclock = xen_set_wallclock;
}
Example #17
0
static int gnttab_setup(void)
{
	unsigned int max_nr_gframes;

	max_nr_gframes = gnttab_max_grant_frames();
	if (max_nr_gframes < nr_grant_frames)
		return -ENOSYS;

	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
		if (gnttab_shared.addr == NULL) {
			pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
				(unsigned long)xen_auto_xlat_grant_frames.vaddr);
			return -ENOMEM;
		}
	}
	return gnttab_map(0, nr_grant_frames - 1);
}
Example #18
0
File: p2m.c Project: DenisLug/mptcp
void xen_setup_mfn_list_list(void)
{
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return;

	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

	if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL;
	else
		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
			virt_to_mfn(p2m_top_mfn);
	HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
	HYPERVISOR_shared_info->arch.p2m_generation = 0;
	HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
	HYPERVISOR_shared_info->arch.p2m_cr3 =
		xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
}
Example #19
0
void clear_highpage(struct page *page)
{
	void *kaddr;

	if (likely(xen_feature(XENFEAT_highmem_assist))
	    && PageHighMem(page)) {
		struct mmuext_op meo;

		meo.cmd = MMUEXT_CLEAR_PAGE;
		meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
		if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
			return;
	}

	kaddr = kmap_atomic(page, KM_USER0);
	clear_page(kaddr);
	kunmap_atomic(kaddr, KM_USER0);
}
Example #20
0
void
init_gnttab(void)
{
    struct gnttab_setup_table setup;
    unsigned long frames[NR_GRANT_FRAMES];
    int i;

#ifdef GNT_DEBUG
    memset(inuse, 1, sizeof(inuse));
#endif
    for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
        put_free_entry(i);

    if (!xen_feature(XENFEAT_auto_translated_physmap)) {
        setup.dom = DOMID_SELF;
        setup.nr_frames = NR_GRANT_FRAMES;
        set_xen_guest_handle(setup.frame_list, frames);

        HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
        gnttab_table = map_frames(frames, NR_GRANT_FRAMES);
    }
    else {
        struct xen_add_to_physmap xatp;
        i = NR_GRANT_FRAMES - 1;

        /* map_frames works differently if p2m is autotranslated,
         * in that gnttab_table are just mapped with vaddrs that are the same as
         * paddrs by mini-os itself, and provided to Xen for p2m mapping */
        gnttab_table = map_frames(NULL, NR_GRANT_FRAMES);

        for(i = NR_GRANT_FRAMES - 1; i >= 0; i--) {
            xatp.domid = DOMID_SELF;
            xatp.idx = i;
            xatp.space = XENMAPSPACE_grant_table;
            xatp.gpfn = 
                virt_to_pfn((unsigned long)gnttab_table + (i << PAGE_SHIFT));
            if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0) {
                printk("grant table add_to_physmap failed\n");
                break;
            }
        }
    }
    printk("gnttab_table mapped at %p.\n", gnttab_table);
}
Example #21
0
File: p2m.c Project: DenisLug/mptcp
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
{
	int i, ret = 0;
	pte_t *pte;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops) {
		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
						kmap_ops, count);
		if (ret)
			goto out;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn, pfn;

		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		pfn = page_to_pfn(pages[i]);

		WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");

		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
			goto out;
		}
	}

out:
	return ret;
}
Example #22
0
void xen_setup_shared_info(void)
{
	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
		set_fixmap(FIX_PARAVIRT_BOOTMAP,
			   xen_start_info->shared_info);

		HYPERVISOR_shared_info =
			(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
	} else
		HYPERVISOR_shared_info =
			(struct shared_info *)__va(xen_start_info->shared_info);

#ifndef CONFIG_SMP
	/* In UP this is as good a place as any to set up shared info */
	xen_setup_vcpu_info_placement();
#endif

	xen_setup_mfn_list_list();
}
Example #23
0
void __init xen_arch_setup(void)
{
	struct physdev_set_iopl set_iopl;
	int rc;

	xen_panic_handler_init();

	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_pae_extended_cr3);

	if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
		BUG();

	xen_enable_sysenter();
	xen_enable_syscall();

	set_iopl.iopl = 1;
	rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
	if (rc != 0)
		printk(KERN_INFO "physdev_op failed %d\n", rc);

#ifdef CONFIG_ACPI
	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
		disable_acpi();
	}
#endif

	memcpy(boot_command_line, xen_start_info->cmd_line,
	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);

	pm_idle = xen_idle;

	paravirt_disable_iospace();

	fiddle_vdso();
}
Example #24
0
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
			      struct gnttab_map_grant_ref *kmap_ops,
			      struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
		unsigned long pfn = page_to_pfn(pages[i]);

		if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
			ret = -EINVAL;
			goto out;
		}

		set_page_private(pages[i], INVALID_P2M_ENTRY);
		WARN_ON(!PagePrivate(pages[i]));
		ClearPagePrivate(pages[i]);
		set_phys_to_machine(pfn, pages[i]->index);

		if (kmap_ops)
			ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
		if (ret)
			goto out;
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();
	return ret;
}
Example #25
0
/* This is a twin to netbk_gop_skb.  Assume that netbk_gop_skb was
   used to set up the operations on the top of
   netrx_pending_operations, which have since been done.  Check that
   they didn't give any errors and advance over them. */
static int netbk_check_gop(int nr_frags, domid_t domid,
			   struct netrx_pending_operations *npo)
{
	multicall_entry_t *mcl;
	gnttab_transfer_t *gop;
	gnttab_copy_t     *copy_op;
	int status = NETIF_RSP_OKAY;
	int i;

	for (i = 0; i <= nr_frags; i++) {
		if (npo->meta[npo->meta_cons + i].copy) {
			copy_op = npo->copy + npo->copy_cons++;
			if (copy_op->status != GNTST_okay) {
				DPRINTK("Bad status %d from copy to DOM%d.\n",
					gop->status, domid);
				status = NETIF_RSP_ERROR;
			}
		} else {
			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
				mcl = npo->mcl + npo->mcl_cons++;
				/* The update_va_mapping() must not fail. */
				BUG_ON(mcl->result != 0);
			}

			gop = npo->trans + npo->trans_cons++;
			/* Check the reassignment error code. */
			if (gop->status != 0) {
				DPRINTK("Bad status %d from grant transfer to DOM%u\n",
					gop->status, domid);
				/*
				 * Page no longer belongs to us unless
				 * GNTST_bad_page, but that should be
				 * a fatal error anyway.
				 */
				BUG_ON(gop->status == GNTST_bad_page);
				status = NETIF_RSP_ERROR;
			}
		}
	}

	return status;
}
__init void xen_hvm_init_time_ops(void)
{
	/* vector callback is needed otherwise we cannot receive interrupts
	 * on cpu > 0 */
	if (!xen_have_vector_callback && num_present_cpus() > 1)
		return;
	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
				"disable pv timer\n");
		return;
	}

	pv_time_ops = xen_time_ops;
	x86_init.timers.setup_percpu_clockev = xen_time_init;
	x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;

	x86_platform.calibrate_tsc = xen_tsc_khz;
	x86_platform.get_wallclock = xen_get_wallclock;
	x86_platform.set_wallclock = xen_set_wallclock;
}
Example #27
0
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
		struct page **pages, unsigned int count)
{
	int i, ret;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	for (i = 0; i < count; i++) {
		ret = m2p_remove_override(pages[i], true /* clear the PTE */);
		if (ret)
			return ret;
	}

	return ret;
}
Example #28
0
void __init xen_arch_setup(void)
{
	xen_panic_handler_init();

	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_pae_extended_cr3);

	if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
		BUG();

	xen_enable_sysenter();
	xen_enable_syscall();

#ifdef CONFIG_ACPI
	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
		disable_acpi();
	}
#endif

	memcpy(boot_command_line, xen_start_info->cmd_line,
	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);

	/* Set up idle, making sure it calls safe_halt() pvop */
#ifdef CONFIG_X86_32
	boot_cpu_data.hlt_works_ok = 1;
#endif
	pm_idle = default_idle;
	boot_option_idle_override = IDLE_HALT;

	fiddle_vdso();
#ifdef CONFIG_NUMA
	numa_off = 1;
#endif
}
Example #29
0
void __init xen_arch_setup(void)
{
	xen_panic_handler_init();

	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_pae_extended_cr3);

	if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
		BUG();

	xen_enable_sysenter();
	xen_enable_syscall();
	xen_enable_nmi();
#ifdef CONFIG_ACPI
	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
		disable_acpi();
	}
#endif

	memcpy(boot_command_line, xen_start_info->cmd_line,
	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);

	/* Set up idle, making sure it calls safe_halt() pvop */
	disable_cpuidle();
	disable_cpufreq();
	WARN_ON(xen_set_default_idle());
	fiddle_vdso();
#ifdef CONFIG_NUMA
	if (!xen_initial_domain() && xen_vnuma_supported())
		numa_off = 0;
	else
		numa_off = 1;
#endif
}
Example #30
0
void __init xen_arch_setup(void)
{
	struct physdev_set_iopl set_iopl;
	int rc;

	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);

	HYPERVISOR_set_callbacks(__KERNEL_CS, (unsigned long)xen_hypervisor_callback,
				 __KERNEL_CS, (unsigned long)xen_failsafe_callback);

	set_iopl.iopl = 1;
	rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
	if (rc != 0)
		printk(KERN_INFO "physdev_op failed %d\n", rc);

#ifdef CONFIG_ACPI
	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
		disable_acpi();
	}
#endif

	memcpy(boot_command_line, xen_start_info->cmd_line,
	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);

	pm_idle = xen_idle;

#ifdef CONFIG_SMP
	/* fill cpus_possible with all available cpus */
	xen_fill_possible_map();
#endif

	paravirt_disable_iospace();

	fiddle_vdso();
}