Exemplo n.º 1
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
char *__init __acpi_map_table(unsigned long phys, unsigned long size)
{
	unsigned long base, offset, mapped_size;
	int idx;

	if (!phys || !size)
		return NULL;

	if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT))
		return __va(phys);

	offset = phys & (PAGE_SIZE - 1);
	mapped_size = PAGE_SIZE - offset;
	clear_fixmap(FIX_ACPI_END);
	set_fixmap(FIX_ACPI_END, phys);
	base = fix_to_virt(FIX_ACPI_END);

	/*
	 * Most cases can be covered by the below.
	 */
	idx = FIX_ACPI_END;
	while (mapped_size < size) {
		if (--idx < FIX_ACPI_BEGIN)
			return NULL;	/* cannot handle this */
		phys += PAGE_SIZE;
		clear_fixmap(idx);
		set_fixmap(idx, phys);
		mapped_size += PAGE_SIZE;
	}

	return ((unsigned char *)base + offset);
}
Exemplo n.º 2
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
static __init char *
__va_range(unsigned long phys, unsigned long size)
{
    unsigned long base, offset, mapped_size;
    int idx;

    offset = phys & (PAGE_SIZE - 1);
    mapped_size = PAGE_SIZE - offset;
    set_fixmap(FIX_IO_APIC_BASE_END, phys);
    base = fix_to_virt(FIX_IO_APIC_BASE_END);
    dprintk("__va_range(0x%lx, 0x%lx): idx=%d mapped at %lx\n", phys, size,
            FIX_IO_APIC_BASE_END, base);

    /*
     * Most cases can be covered by the below.
     */
    idx = FIX_IO_APIC_BASE_END;
    while (mapped_size < size) {
        if (--idx < FIX_IO_APIC_BASE_0)
            return 0;	/* cannot handle this */
        phys += PAGE_SIZE;
        set_fixmap(idx, phys);
        mapped_size += PAGE_SIZE;
    }

    return ((unsigned char *) base + offset);
}
Exemplo n.º 3
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
char *__acpi_map_table(unsigned long phys, unsigned long size)
{
	unsigned long base, offset, mapped_size;
	int idx;

	if (phys + size < 8*1024*1024) 
		return __va(phys); 

	offset = phys & (PAGE_SIZE - 1);
	mapped_size = PAGE_SIZE - offset;
	set_fixmap(FIX_ACPI_END, phys);
	base = fix_to_virt(FIX_ACPI_END);

	/*
	 * Most cases can be covered by the below.
	 */
	idx = FIX_ACPI_END;
	while (mapped_size < size) {
		if (--idx < FIX_ACPI_BEGIN)
			return 0;	/* cannot handle this */
		phys += PAGE_SIZE;
		set_fixmap(idx, phys);
		mapped_size += PAGE_SIZE;
	}

	return ((unsigned char *) base + offset);
}
Exemplo n.º 4
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_0,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 */
static __inline__ char *
__va_range(unsigned long phys, unsigned long size)
{
    unsigned long base, offset, mapped_size, mapped_phys = phys;
    int idx = FIX_IO_APIC_BASE_0;

    offset = phys & (PAGE_SIZE - 1);
    mapped_size = PAGE_SIZE - offset;
    set_fixmap(idx, mapped_phys);
    base = fix_to_virt(FIX_IO_APIC_BASE_0);

    /*
     * Most cases can be covered by the below.
     */
    if (mapped_size >= size)
        return ((unsigned char *) base + offset);

    dprintk("__va_range: mapping more than a single page, size = 0x%lx\n",
            size);

    do {
        if (idx++ == FIX_IO_APIC_BASE_END)
            return 0;	/* cannot handle this */
        mapped_phys = mapped_phys + PAGE_SIZE;
        set_fixmap(idx, mapped_phys);
        mapped_size = mapped_size + PAGE_SIZE;
    } while (mapped_size < size);

    return ((unsigned char *) base + offset);
}
Exemplo n.º 5
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
char *__acpi_map_table(unsigned long phys, unsigned long size)
{
	unsigned long base, offset, mapped_size;
	int idx;

	/* XEN: RAM holes above 1MB are not permanently mapped. */
	if ((phys + size) <= (1 * 1024 * 1024))
		return __va(phys);

	offset = phys & (PAGE_SIZE - 1);
	mapped_size = PAGE_SIZE - offset;
	set_fixmap(FIX_ACPI_END, phys);
	base = fix_to_virt(FIX_ACPI_END);

	/*
	 * Most cases can be covered by the below.
	 */
	idx = FIX_ACPI_END;
	while (mapped_size < size) {
		if (--idx < FIX_ACPI_BEGIN)
			return NULL;	/* cannot handle this */
		phys += PAGE_SIZE;
		set_fixmap(idx, phys);
		mapped_size += PAGE_SIZE;
	}

	return ((char *) base + offset);
}
Exemplo n.º 6
0
static void post_suspend(void)
{
	int i, j, k, fpp;
	extern unsigned long max_pfn;
	extern unsigned long *pfn_to_mfn_frame_list_list;
	extern unsigned long *pfn_to_mfn_frame_list[];

#ifdef CONFIG_SMP
	cpu_initialized_map = cpu_online_map;
#endif

	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);

	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);

	memset(empty_zero_page, 0, PAGE_SIZE);

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		virt_to_mfn(pfn_to_mfn_frame_list_list);

	fpp = PAGE_SIZE/sizeof(unsigned long);
	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
		if ((j % fpp) == 0) {
			k++;
			pfn_to_mfn_frame_list_list[k] =
				virt_to_mfn(pfn_to_mfn_frame_list[k]);
			j = 0;
		}
		pfn_to_mfn_frame_list[k][j] =
			virt_to_mfn(&phys_to_machine_mapping[i]);
	}
	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
}
Exemplo n.º 7
0
Arquivo: tboot.c Projeto: amodj/Utopia
void __init tboot_probe(void)
{
    tboot_shared_t *tboot_shared;
    unsigned long p_tboot_shared;
    uint32_t map_base, map_size;
    unsigned long map_addr;

    /* Look for valid page-aligned address for shared page. */
    p_tboot_shared = simple_strtoul(opt_tboot, NULL, 0);
    if ( (p_tboot_shared == 0) || ((p_tboot_shared & ~PAGE_MASK) != 0) )
        return;

    /* Map and check for tboot UUID. */
    set_fixmap(FIX_TBOOT_SHARED_BASE, p_tboot_shared);
    tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE);
    if ( tboot_shared == NULL )
        return;
    if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) )
        return;

    /* new tboot_shared (w/ GAS support, integrity, etc.) is not backwards
       compatible */
    if ( tboot_shared->version < 4 ) {
        printk("unsupported version of tboot (%u)\n", tboot_shared->version);
        return;
    }

    g_tboot_shared = tboot_shared;
    printk("TBOOT: found shared page at phys addr %lx:\n", p_tboot_shared);
    printk("  version: %d\n", tboot_shared->version);
    printk("  log_addr: 0x%08x\n", tboot_shared->log_addr);
    printk("  shutdown_entry: 0x%08x\n", tboot_shared->shutdown_entry);
    printk("  tboot_base: 0x%08x\n", tboot_shared->tboot_base);
    printk("  tboot_size: 0x%x\n", tboot_shared->tboot_size);

    /* these will be needed by tboot_protect_mem_regions() and/or
       tboot_parse_dmar_table(), so get them now */

    map_base = PFN_DOWN(TXT_PUB_CONFIG_REGS_BASE);
    map_size = PFN_UP(NR_TXT_CONFIG_PAGES * PAGE_SIZE);
    map_addr = (unsigned long)__va(map_base << PAGE_SHIFT);
    if ( map_pages_to_xen(map_addr, map_base, map_size, __PAGE_HYPERVISOR) )
        return;

    /* TXT Heap */
    txt_heap_base =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE);
    txt_heap_size =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE);

    /* SINIT */
    sinit_base =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
    sinit_size =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);

    destroy_xen_mappings((unsigned long)__va(map_base << PAGE_SHIFT),
                         (unsigned long)__va((map_base + map_size) << PAGE_SHIFT));
}
static __init void lithium_init(void)
{
	set_fixmap(FIX_LI_PCIA, LI_PCI_A_PHYS);
	set_fixmap(FIX_LI_PCIB, LI_PCI_B_PHYS);

	if ((li_pcia_read16(PCI_VENDOR_ID) != PCI_VENDOR_ID_SGI) ||
	    (li_pcia_read16(PCI_DEVICE_ID) != PCI_DEVICE_ID_SGI_LITHIUM)) {
		printk(KERN_EMERG "Lithium hostbridge %c not found\n", 'A');
/*		panic("This machine is not SGI Visual Workstation 320/540"); */
	}

	if ((li_pcib_read16(PCI_VENDOR_ID) != PCI_VENDOR_ID_SGI) ||
	    (li_pcib_read16(PCI_DEVICE_ID) != PCI_DEVICE_ID_SGI_LITHIUM)) {
		printk(KERN_EMERG "Lithium hostbridge %c not found\n", 'B');
/*		panic("This machine is not SGI Visual Workstation 320/540"); */
	}

	li_pcia_write16(LI_PCI_INTEN, ALLDEVS);
	li_pcib_write16(LI_PCI_INTEN, ALLDEVS);
}
Exemplo n.º 9
0
static __init void cobalt_init(void)
{
	/*
	 * On normal SMP PC this is used only with SMP, but we have to
	 * use it and set it up here to start the Cobalt clock
	 */
	set_fixmap(FIX_APIC_BASE, APIC_DEFAULT_PHYS_BASE);
	setup_local_APIC();
	printk(KERN_INFO "Local APIC Version %#lx, ID %#lx\n",
		apic_read(APIC_LVR), apic_read(APIC_ID));

	set_fixmap(FIX_CO_CPU, CO_CPU_PHYS);
	set_fixmap(FIX_CO_APIC, CO_APIC_PHYS);
	printk(KERN_INFO "Cobalt Revision %#lx, APIC ID %#lx\n",
		co_cpu_read(CO_CPU_REV), co_apic_read(CO_APIC_ID));

	/* Enable Cobalt APIC being careful to NOT change the ID! */
	co_apic_write(CO_APIC_ID, co_apic_read(CO_APIC_ID) | CO_APIC_ENABLE);

	printk(KERN_INFO "Cobalt APIC enabled: ID reg %#lx\n",
		co_apic_read(CO_APIC_ID));
}
Exemplo n.º 10
0
void __init tboot_probe(void)
{
    tboot_shared_t *tboot_shared;

    /* Look for valid page-aligned address for shared page. */
    if ( !opt_tboot_pa || (opt_tboot_pa & ~PAGE_MASK) )
        return;

    /* Map and check for tboot UUID. */
    set_fixmap(FIX_TBOOT_SHARED_BASE, opt_tboot_pa);
    tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE);
    if ( tboot_shared == NULL )
        return;
    if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) )
        return;

    /* new tboot_shared (w/ GAS support, integrity, etc.) is not backwards
       compatible */
    if ( tboot_shared->version < 4 )
    {
        printk("unsupported version of tboot (%u)\n", tboot_shared->version);
        return;
    }

    g_tboot_shared = tboot_shared;
    printk("TBOOT: found shared page at phys addr %#lx:\n", opt_tboot_pa);
    printk("  version: %d\n", tboot_shared->version);
    printk("  log_addr: %#x\n", tboot_shared->log_addr);
    printk("  shutdown_entry: %#x\n", tboot_shared->shutdown_entry);
    printk("  tboot_base: %#x\n", tboot_shared->tboot_base);
    printk("  tboot_size: %#x\n", tboot_shared->tboot_size);
    if ( tboot_shared->version >= 6 )
        printk("  flags: %#x\n", tboot_shared->flags);

    /* these will be needed by tboot_protect_mem_regions() and/or
       tboot_parse_dmar_table(), so get them now */

    txt_heap_base = txt_heap_size = sinit_base = sinit_size = 0;
    /* TXT Heap */
    tboot_copy_memory((unsigned char *)&txt_heap_base, sizeof(txt_heap_base),
                      TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE);
    tboot_copy_memory((unsigned char *)&txt_heap_size, sizeof(txt_heap_size),
                      TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE);
    /* SINIT */
    tboot_copy_memory((unsigned char *)&sinit_base, sizeof(sinit_base),
                      TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
    tboot_copy_memory((unsigned char *)&sinit_size, sizeof(sinit_size),
                      TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);
    clear_fixmap(FIX_TBOOT_MAP_ADDRESS);
}
Exemplo n.º 11
0
static void __init tboot_copy_memory(unsigned char *va, uint32_t size,
                                     unsigned long pa)
{
    unsigned long map_base = 0;
    unsigned char *map_addr = NULL;
    unsigned int i;

    for ( i = 0; i < size; i++ )
    {
        if ( map_base != PFN_DOWN(pa + i) )
        {
            map_base = PFN_DOWN(pa + i);
            set_fixmap(FIX_TBOOT_MAP_ADDRESS, map_base << PAGE_SHIFT);
            map_addr = (unsigned char *)fix_to_virt(FIX_TBOOT_MAP_ADDRESS);
        }
        va[i] = map_addr[pa + i - (map_base << PAGE_SHIFT)];
    }
}
Exemplo n.º 12
0
void __init tboot_probe(void)
{
	/* Look for valid page-aligned address for shared page. */
	if (!boot_params.tboot_addr)
		return;
	/*
	 * also verify that it is mapped as we expect it before calling
	 * set_fixmap(), to reduce chance of garbage value causing crash
	 */
	if (!e820_any_mapped(boot_params.tboot_addr,
			     boot_params.tboot_addr, E820_RESERVED)) {
		pr_warning("non-0 tboot_addr but it is not of type E820_RESERVED\n");
		return;
	}

	/* only a natively booted kernel should be using TXT */
	if (paravirt_enabled()) {
		pr_warning("non-0 tboot_addr but pv_ops is enabled\n");
		return;
	}

	/* Map and check for tboot UUID. */
	set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr);
	tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE);
	if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
		pr_warning("tboot at 0x%llx is invalid\n",
			   boot_params.tboot_addr);
		tboot = NULL;
		return;
	}
	if (tboot->version < 5) {
		pr_warning("tboot version is invalid: %u\n", tboot->version);
		tboot = NULL;
		return;
	}

	pr_info("found shared page at phys addr 0x%llx:\n",
		boot_params.tboot_addr);
	pr_debug("version: %d\n", tboot->version);
	pr_debug("log_addr: 0x%08x\n", tboot->log_addr);
	pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry);
	pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base);
	pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
}
Exemplo n.º 13
0
void xen_setup_shared_info(void)
{
	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
		set_fixmap(FIX_PARAVIRT_BOOTMAP,
			   xen_start_info->shared_info);

		HYPERVISOR_shared_info =
			(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
	} else
		HYPERVISOR_shared_info =
			(struct shared_info *)__va(xen_start_info->shared_info);

#ifndef CONFIG_SMP
	/* In UP this is as good a place as any to set up shared info */
	xen_setup_vcpu_info_placement();
#endif

	xen_setup_mfn_list_list();
}
Exemplo n.º 14
0
static int __do_suspend(void *ignore)
{
	int i, j, k, fpp, err;

	extern unsigned long max_pfn;
	extern unsigned long *pfn_to_mfn_frame_list_list;
	extern unsigned long *pfn_to_mfn_frame_list[];

	extern void time_resume(void);

	BUG_ON(smp_processor_id() != 0);
	BUG_ON(in_interrupt());

	if (xen_feature(XENFEAT_auto_translated_physmap)) {
		printk(KERN_WARNING "Cannot suspend in "
		       "auto_translated_physmap mode.\n");
		return -EOPNOTSUPP;
	}

	err = smp_suspend();
	if (err)
		return err;

	xenbus_suspend();

	preempt_disable();

#ifdef __i386__
	kmem_cache_shrink(pgd_cache);
#endif
	mm_pin_all();

	__cli();
	preempt_enable();

	gnttab_suspend();

	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
	clear_fixmap(FIX_SHARED_INFO);

	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);

	/*
	 * We'll stop somewhere inside this hypercall. When it returns,
	 * we'll start resuming after the restore.
	 */
	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));

	shutting_down = SHUTDOWN_INVALID;

	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);

	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);

	memset(empty_zero_page, 0, PAGE_SIZE);

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		virt_to_mfn(pfn_to_mfn_frame_list_list);

	fpp = PAGE_SIZE/sizeof(unsigned long);
	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
		if ((j % fpp) == 0) {
			k++;
			pfn_to_mfn_frame_list_list[k] =
				virt_to_mfn(pfn_to_mfn_frame_list[k]);
			j = 0;
		}
		pfn_to_mfn_frame_list[k][j] =
			virt_to_mfn(&phys_to_machine_mapping[i]);
	}
	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;

	gnttab_resume();

	irq_resume();

	time_resume();

	switch_idle_mm();

	__sti();

	xencons_resume();

	xenbus_resume();

	smp_resume();

	return err;
}
Exemplo n.º 15
0
void __init xen_start_kernel(void)
{
	unsigned int i;
	struct xen_machphys_mapping mapping;
	unsigned long machine_to_phys_nr_ents;
#ifdef CONFIG_X86_32
	struct xen_platform_parameters pp;
	extern pte_t swapper_pg_fixmap[PTRS_PER_PTE];
	unsigned long addr;
#endif

	xen_setup_features();

	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
		machine_to_phys_nr_ents = mapping.max_mfn + 1;
	} else
		machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
		machine_to_phys_order++;

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		phys_to_machine_mapping =
			(unsigned long *)xen_start_info->mfn_list;

	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_writable_pagetables));

	reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE),
		      __pa(xen_start_info->pt_base)
		      + (xen_start_info->nr_pt_frames << PAGE_SHIFT),
		      "Xen provided");

#ifdef CONFIG_X86_32
	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_4gb_segments));

	init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;

	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
		hypervisor_virt_start = pp.virt_start;
		reserve_top_address(0UL - pp.virt_start);
	}

	BUG_ON(pte_index(hypervisor_virt_start));

	/* Do an early initialization of the fixmap area */
	make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables);
	addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE);
	set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr),
				      addr),
			   addr),
		__pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE));
#else
	check_efer();
	xen_init_pt();
#endif

#define __FIXADDR_TOP (-PAGE_SIZE)
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \
			!= pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE)))
	FIX_BUG_ON(SHARED_INFO);
	FIX_BUG_ON(ISAMAP_BEGIN);
	FIX_BUG_ON(ISAMAP_END);
#undef pmd_index
#undef __FIXADDR_TOP

	/* Switch to the real shared_info page, and clear the dummy page. */
	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
	memset(empty_zero_page, 0, sizeof(empty_zero_page));

	setup_vcpu_info(0);

	/* Set up mapping of lowest 1MB of physical memory. */
	for (i = 0; i < NR_FIX_ISAMAPS; i++)
		if (is_initial_xendomain())
			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
		else
			__set_fixmap(FIX_ISAMAP_BEGIN - i,
				     virt_to_machine(empty_zero_page),
				     PAGE_KERNEL_RO);

}