Beispiel #1
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
char *__init __acpi_map_table(unsigned long phys, unsigned long size)
{
	unsigned long base, offset, mapped_size;
	int idx;

	if (!phys || !size)
		return NULL;

	if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT))
		return __va(phys);

	offset = phys & (PAGE_SIZE - 1);
	mapped_size = PAGE_SIZE - offset;
	clear_fixmap(FIX_ACPI_END);
	set_fixmap(FIX_ACPI_END, phys);
	base = fix_to_virt(FIX_ACPI_END);

	/*
	 * Most cases can be covered by the below.
	 */
	idx = FIX_ACPI_END;
	while (mapped_size < size) {
		if (--idx < FIX_ACPI_BEGIN)
			return NULL;	/* cannot handle this */
		phys += PAGE_SIZE;
		clear_fixmap(idx);
		set_fixmap(idx, phys);
		mapped_size += PAGE_SIZE;
	}

	return ((unsigned char *)base + offset);
}
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
static __init char *
__va_range(unsigned long phys, unsigned long size)
{
    unsigned long base, offset, mapped_size;
    int idx;

    offset = phys & (PAGE_SIZE - 1);
    mapped_size = PAGE_SIZE - offset;
    set_fixmap(FIX_IO_APIC_BASE_END, phys);
    base = fix_to_virt(FIX_IO_APIC_BASE_END);
    dprintk("__va_range(0x%lx, 0x%lx): idx=%d mapped at %lx\n", phys, size,
            FIX_IO_APIC_BASE_END, base);

    /*
     * Most cases can be covered by the below.
     */
    idx = FIX_IO_APIC_BASE_END;
    while (mapped_size < size) {
        if (--idx < FIX_IO_APIC_BASE_0)
            return 0;	/* cannot handle this */
        phys += PAGE_SIZE;
        set_fixmap(idx, phys);
        mapped_size += PAGE_SIZE;
    }

    return ((unsigned char *) base + offset);
}
Beispiel #3
0
/*
 * map IGD MMIO+0x2000 page to allow Xen access to IGD 3D register.
 */
static void *map_igd_reg(void)
{
    u64 igd_mmio, igd_reg;

    if ( !is_cantiga_b3 && !is_snb_gfx )
        return NULL;

    if ( igd_reg_va )
        return igd_reg_va;

    /* get IGD mmio address in PCI BAR */
    igd_mmio = ((u64)pci_conf_read32(0, IGD_DEV, 0, 0x14) << 32) +
                     pci_conf_read32(0, IGD_DEV, 0, 0x10);

    /* offset of IGD regster we want to access is in 0x2000 range */
    igd_reg = (igd_mmio & IGD_BAR_MASK) + 0x2000;

    /* ioremap this physical page */
#if defined(CONFIG_X86)
    set_fixmap_nocache(FIX_IGD_MMIO, igd_reg);
    igd_reg_va = (u8 *)fix_to_virt(FIX_IGD_MMIO);
#else
    igd_reg_va = ioremap_nocache(igd_reg, 0x100);
#endif
    return igd_reg_va;
}
Beispiel #4
0
static force_inline void do_vgettimeofday(struct timeval * tv)
{
	long sequence, t;
	unsigned long sec, usec;

	do {
		sequence = __vxtime_sequence[1];
		rmb();

		sec = __xtime.tv_sec;
		usec = __xtime.tv_usec + (__jiffies - __wall_jiffies) * (1000000 / HZ);

		switch (__vxtime.mode) {

			case VXTIME_TSC:
				sync_core();
				rdtscll(t);
				usec += (((t  - __vxtime.last_tsc) * __vxtime.tsc_quot) >> 32);
				break;

			case VXTIME_HPET:
				usec += ((readl(fix_to_virt(VSYSCALL_HPET) + 0xf0) - __vxtime.last) * __vxtime.quot) >> 32;
				break;

		}

		rmb();
	} while (sequence != __vxtime_sequence[0]);

	tv->tv_sec = sec + usec / 1000000;
	tv->tv_usec = usec % 1000000;
}
Beispiel #5
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_0,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 */
static __inline__ char *
__va_range(unsigned long phys, unsigned long size)
{
    unsigned long base, offset, mapped_size, mapped_phys = phys;
    int idx = FIX_IO_APIC_BASE_0;

    offset = phys & (PAGE_SIZE - 1);
    mapped_size = PAGE_SIZE - offset;
    set_fixmap(idx, mapped_phys);
    base = fix_to_virt(FIX_IO_APIC_BASE_0);

    /*
     * Most cases can be covered by the below.
     */
    if (mapped_size >= size)
        return ((unsigned char *) base + offset);

    dprintk("__va_range: mapping more than a single page, size = 0x%lx\n",
            size);

    do {
        if (idx++ == FIX_IO_APIC_BASE_END)
            return 0;	/* cannot handle this */
        mapped_phys = mapped_phys + PAGE_SIZE;
        set_fixmap(idx, mapped_phys);
        mapped_size = mapped_size + PAGE_SIZE;
    } while (mapped_size < size);

    return ((unsigned char *) base + offset);
}
Beispiel #6
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
char *__acpi_map_table(unsigned long phys, unsigned long size)
{
	unsigned long base, offset, mapped_size;
	int idx;

	/* XEN: RAM holes above 1MB are not permanently mapped. */
	if ((phys + size) <= (1 * 1024 * 1024))
		return __va(phys);

	offset = phys & (PAGE_SIZE - 1);
	mapped_size = PAGE_SIZE - offset;
	set_fixmap(FIX_ACPI_END, phys);
	base = fix_to_virt(FIX_ACPI_END);

	/*
	 * Most cases can be covered by the below.
	 */
	idx = FIX_ACPI_END;
	while (mapped_size < size) {
		if (--idx < FIX_ACPI_BEGIN)
			return NULL;	/* cannot handle this */
		phys += PAGE_SIZE;
		set_fixmap(idx, phys);
		mapped_size += PAGE_SIZE;
	}

	return ((char *) base + offset);
}
Beispiel #7
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
char *__acpi_map_table(unsigned long phys, unsigned long size)
{
	unsigned long base, offset, mapped_size;
	int idx;

	if (phys + size < 8*1024*1024) 
		return __va(phys); 

	offset = phys & (PAGE_SIZE - 1);
	mapped_size = PAGE_SIZE - offset;
	set_fixmap(FIX_ACPI_END, phys);
	base = fix_to_virt(FIX_ACPI_END);

	/*
	 * Most cases can be covered by the below.
	 */
	idx = FIX_ACPI_END;
	while (mapped_size < size) {
		if (--idx < FIX_ACPI_BEGIN)
			return 0;	/* cannot handle this */
		phys += PAGE_SIZE;
		set_fixmap(idx, phys);
		mapped_size += PAGE_SIZE;
	}

	return ((unsigned char *) base + offset);
}
Beispiel #8
0
static void post_suspend(void)
{
	int i, j, k, fpp;
	extern unsigned long max_pfn;
	extern unsigned long *pfn_to_mfn_frame_list_list;
	extern unsigned long *pfn_to_mfn_frame_list[];

#ifdef CONFIG_SMP
	cpu_initialized_map = cpu_online_map;
#endif

	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);

	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);

	memset(empty_zero_page, 0, PAGE_SIZE);

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		virt_to_mfn(pfn_to_mfn_frame_list_list);

	fpp = PAGE_SIZE/sizeof(unsigned long);
	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
		if ((j % fpp) == 0) {
			k++;
			pfn_to_mfn_frame_list_list[k] =
				virt_to_mfn(pfn_to_mfn_frame_list[k]);
			j = 0;
		}
		pfn_to_mfn_frame_list[k][j] =
			virt_to_mfn(&phys_to_machine_mapping[i]);
	}
	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
}
Beispiel #9
0
/*
 * paging_init() sets up the page tables - note that all of lowmem is
 * already mapped by head.S.
 */
void __init paging_init(void)
{
#ifdef __tilegx__
	pud_t *pud;
#endif
	pgd_t *pgd_base = swapper_pg_dir;

	kernel_physical_mapping_init(pgd_base);

	/* Fixed mappings, only the page table structure has to be created. */
	page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1),
			      FIXADDR_TOP, pgd_base);

#ifdef CONFIG_HIGHMEM
	permanent_kmaps_init(pgd_base);
#endif

#ifdef __tilegx__
	/*
	 * Since GX allocates just one pmd_t array worth of vmalloc space,
	 * we go ahead and allocate it statically here, then share it
	 * globally.  As a result we don't have to worry about any task
	 * changing init_mm once we get up and running, and there's no
	 * need for e.g. vmalloc_sync_all().
	 */
	BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
	pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
	assign_pmd(pud, alloc_pmd());
#endif
}
Beispiel #10
0
void __init tboot_probe(void)
{
    tboot_shared_t *tboot_shared;
    unsigned long p_tboot_shared;
    uint32_t map_base, map_size;
    unsigned long map_addr;

    /* Look for valid page-aligned address for shared page. */
    p_tboot_shared = simple_strtoul(opt_tboot, NULL, 0);
    if ( (p_tboot_shared == 0) || ((p_tboot_shared & ~PAGE_MASK) != 0) )
        return;

    /* Map and check for tboot UUID. */
    set_fixmap(FIX_TBOOT_SHARED_BASE, p_tboot_shared);
    tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE);
    if ( tboot_shared == NULL )
        return;
    if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) )
        return;

    /* new tboot_shared (w/ GAS support, integrity, etc.) is not backwards
       compatible */
    if ( tboot_shared->version < 4 ) {
        printk("unsupported version of tboot (%u)\n", tboot_shared->version);
        return;
    }

    g_tboot_shared = tboot_shared;
    printk("TBOOT: found shared page at phys addr %lx:\n", p_tboot_shared);
    printk("  version: %d\n", tboot_shared->version);
    printk("  log_addr: 0x%08x\n", tboot_shared->log_addr);
    printk("  shutdown_entry: 0x%08x\n", tboot_shared->shutdown_entry);
    printk("  tboot_base: 0x%08x\n", tboot_shared->tboot_base);
    printk("  tboot_size: 0x%x\n", tboot_shared->tboot_size);

    /* these will be needed by tboot_protect_mem_regions() and/or
       tboot_parse_dmar_table(), so get them now */

    map_base = PFN_DOWN(TXT_PUB_CONFIG_REGS_BASE);
    map_size = PFN_UP(NR_TXT_CONFIG_PAGES * PAGE_SIZE);
    map_addr = (unsigned long)__va(map_base << PAGE_SHIFT);
    if ( map_pages_to_xen(map_addr, map_base, map_size, __PAGE_HYPERVISOR) )
        return;

    /* TXT Heap */
    txt_heap_base =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE);
    txt_heap_size =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE);

    /* SINIT */
    sinit_base =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
    sinit_size =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);

    destroy_xen_mappings((unsigned long)__va(map_base << PAGE_SHIFT),
                         (unsigned long)__va((map_base + map_size) << PAGE_SHIFT));
}
Beispiel #11
0
static __init int late_hpet_init(void)
{
	struct hpet_data	hd;
	unsigned int 		ntimer;

	if (!hpet_address)
        	return 0;

	memset(&hd, 0, sizeof(hd));

	ntimer = hpet_readl(HPET_ID);
	ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
	ntimer++;

	/*
	 * Register with driver.
	 * Timer0 and Timer1 is used by platform.
	 */
	hd.hd_phys_address = hpet_address;
	hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
	hd.hd_nirqs = ntimer;
	hd.hd_flags = HPET_DATA_PLATFORM;
	hpet_reserve_timer(&hd, 0);
#ifdef	CONFIG_HPET_EMULATE_RTC
	hpet_reserve_timer(&hd, 1);
#endif
	hd.hd_irq[0] = HPET_LEGACY_8254;
	hd.hd_irq[1] = HPET_LEGACY_RTC;
	if (ntimer > 2) {
		struct hpet		*hpet;
		struct hpet_timer	*timer;
		int			i;

		hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
		timer = &hpet->hpet_timers[2];
		for (i = 2; i < ntimer; timer++, i++)
			hd.hd_irq[i] = (timer->hpet_config &
					Tn_INT_ROUTE_CNF_MASK) >>
				Tn_INT_ROUTE_CNF_SHIFT;

	}

	hpet_alloc(&hd);
	return 0;
}
Beispiel #12
0
static void __cpuinit trap_init_f00f_bug(void)
{
	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);

	/*
	 * Update the IDT descriptor and reload the IDT so that
	 * it uses the read-only mapped virtual address.
	 */
	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
	load_idt(&idt_descr);
}
Beispiel #13
0
void xen_arch_pre_suspend(void)
{
	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
	xen_start_info->console.domU.mfn =
		mfn_to_pfn(xen_start_info->console.domU.mfn);

	BUG_ON(!irqs_disabled());

	HYPERVISOR_shared_info = &xen_dummy_shared_info;
	if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
					 __pte_ma(0), 0))
		BUG();
}
Beispiel #14
0
void __init tboot_probe(void)
{
    tboot_shared_t *tboot_shared;

    /* Look for valid page-aligned address for shared page. */
    if ( !opt_tboot_pa || (opt_tboot_pa & ~PAGE_MASK) )
        return;

    /* Map and check for tboot UUID. */
    set_fixmap(FIX_TBOOT_SHARED_BASE, opt_tboot_pa);
    tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE);
    if ( tboot_shared == NULL )
        return;
    if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) )
        return;

    /* new tboot_shared (w/ GAS support, integrity, etc.) is not backwards
       compatible */
    if ( tboot_shared->version < 4 )
    {
        printk("unsupported version of tboot (%u)\n", tboot_shared->version);
        return;
    }

    g_tboot_shared = tboot_shared;
    printk("TBOOT: found shared page at phys addr %#lx:\n", opt_tboot_pa);
    printk("  version: %d\n", tboot_shared->version);
    printk("  log_addr: %#x\n", tboot_shared->log_addr);
    printk("  shutdown_entry: %#x\n", tboot_shared->shutdown_entry);
    printk("  tboot_base: %#x\n", tboot_shared->tboot_base);
    printk("  tboot_size: %#x\n", tboot_shared->tboot_size);
    if ( tboot_shared->version >= 6 )
        printk("  flags: %#x\n", tboot_shared->flags);

    /* these will be needed by tboot_protect_mem_regions() and/or
       tboot_parse_dmar_table(), so get them now */

    txt_heap_base = txt_heap_size = sinit_base = sinit_size = 0;
    /* TXT Heap */
    tboot_copy_memory((unsigned char *)&txt_heap_base, sizeof(txt_heap_base),
                      TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE);
    tboot_copy_memory((unsigned char *)&txt_heap_size, sizeof(txt_heap_size),
                      TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE);
    /* SINIT */
    tboot_copy_memory((unsigned char *)&sinit_base, sizeof(sinit_base),
                      TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
    tboot_copy_memory((unsigned char *)&sinit_size, sizeof(sinit_size),
                      TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);
    clear_fixmap(FIX_TBOOT_MAP_ADDRESS);
}
Beispiel #15
0
/**
 * Creates a kernel mapping for the local APIC.
 *
 * The hardware/platform/BIOS maps each CPU's local APIC at the same location
 * in physical memory. This function uses the 'fixmap' to map the local APIC
 * into the kernel's virtual memory space at a fixed virtual address that is
 * known at compile time. Since the local APIC's virtual address is known
 * at compile time, local APIC registers can be accessed directly, without
 * any pointer dereferencing.
 */
void __init
lapic_map(void)
{
	if (!cpu_has_apic)
		panic("No local APIC.");

	/* Reserve physical memory used by the local APIC */
	lapic_resource.start = lapic_phys_addr;
	lapic_resource.end   = lapic_phys_addr + 4096 - 1;
	request_resource(&iomem_resource, &lapic_resource);

	/* Map local APIC into the kernel */ 
	set_fixmap_nocache(FIX_APIC_BASE, lapic_phys_addr);

	printk(KERN_DEBUG "Local APIC mapped to virtual address 0x%016lx\n",
	                  fix_to_virt(FIX_APIC_BASE));
}
Beispiel #16
0
static void __init tboot_copy_memory(unsigned char *va, uint32_t size,
                                     unsigned long pa)
{
    unsigned long map_base = 0;
    unsigned char *map_addr = NULL;
    unsigned int i;

    for ( i = 0; i < size; i++ )
    {
        if ( map_base != PFN_DOWN(pa + i) )
        {
            map_base = PFN_DOWN(pa + i);
            set_fixmap(FIX_TBOOT_MAP_ADDRESS, map_base << PAGE_SHIFT);
            map_addr = (unsigned char *)fix_to_virt(FIX_TBOOT_MAP_ADDRESS);
        }
        va[i] = map_addr[pa + i - (map_base << PAGE_SHIFT)];
    }
}
Beispiel #17
0
void __init tboot_probe(void)
{
	/* Look for valid page-aligned address for shared page. */
	if (!boot_params.tboot_addr)
		return;
	/*
	 * also verify that it is mapped as we expect it before calling
	 * set_fixmap(), to reduce chance of garbage value causing crash
	 */
	if (!e820_any_mapped(boot_params.tboot_addr,
			     boot_params.tboot_addr, E820_RESERVED)) {
		pr_warning("non-0 tboot_addr but it is not of type E820_RESERVED\n");
		return;
	}

	/* only a natively booted kernel should be using TXT */
	if (paravirt_enabled()) {
		pr_warning("non-0 tboot_addr but pv_ops is enabled\n");
		return;
	}

	/* Map and check for tboot UUID. */
	set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr);
	tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE);
	if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
		pr_warning("tboot at 0x%llx is invalid\n",
			   boot_params.tboot_addr);
		tboot = NULL;
		return;
	}
	if (tboot->version < 5) {
		pr_warning("tboot version is invalid: %u\n", tboot->version);
		tboot = NULL;
		return;
	}

	pr_info("found shared page at phys addr 0x%llx:\n",
		boot_params.tboot_addr);
	pr_debug("version: %d\n", tboot->version);
	pr_debug("log_addr: 0x%08x\n", tboot->log_addr);
	pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry);
	pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base);
	pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
}
Beispiel #18
0
void xen_setup_shared_info(void)
{
	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
		set_fixmap(FIX_PARAVIRT_BOOTMAP,
			   xen_start_info->shared_info);

		HYPERVISOR_shared_info =
			(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
	} else
		HYPERVISOR_shared_info =
			(struct shared_info *)__va(xen_start_info->shared_info);

#ifndef CONFIG_SMP
	/* In UP this is as good a place as any to set up shared info */
	xen_setup_vcpu_info_placement();
#endif

	xen_setup_mfn_list_list();
}
Beispiel #19
0
void __init init_entry_mappings(void)
{
#if CONFIG_X86_HIGH_ENTRY

	void *tramp;

	/*
	 * We need a high IDT and GDT for the 4G/4G split:
	 */
	trap_init_virtual_IDT();

	__set_fixmap(FIX_ENTRY_TRAMPOLINE, __pa((unsigned long)&entry_tramp_start), PAGE_KERNEL_EXEC);
	tramp = (void *)fix_to_virt(FIX_ENTRY_TRAMPOLINE);

	printk("mapped 4G/4G trampoline to %p.\n", tramp);
	/*
	 * Virtual kernel stack:
	 */
	BUG_ON(__kmap_atomic_vaddr(KM_VSTACK0) & 8191);
	BUG_ON(sizeof(struct desc_struct)*NR_CPUS*GDT_ENTRIES > 2*PAGE_SIZE);
	BUG_ON((unsigned int)&entry_tramp_end - (unsigned int)&entry_tramp_start > PAGE_SIZE);

	/*
	 * set up the initial thread's virtual stack related
	 * fields:
	 */
	current->thread.stack_page0 = virt_to_page((char *)current);
	current->thread.stack_page1 = virt_to_page((char *)current + PAGE_SIZE);
	current->virtual_stack = (void *)__kmap_atomic_vaddr(KM_VSTACK0);

	__kunmap_atomic_type(KM_VSTACK0);
	__kunmap_atomic_type(KM_VSTACK1);
        __kmap_atomic(current->thread.stack_page0, KM_VSTACK0);
        __kmap_atomic(current->thread.stack_page1, KM_VSTACK1);

	return_path_start = ENTRY_TRAMP_ADDR(&return_path_start_marker);
	return_path_end = ENTRY_TRAMP_ADDR(&return_path_end_marker);
#endif
	current->real_stack = (void *)current;
	current->user_pgd = NULL;
	current->thread.esp0 = (unsigned long)current->real_stack + THREAD_SIZE;

}
Beispiel #20
0
/*
 * This function cannot be __init, since exceptions don't work in that
 * section.  Put this after the callers, so that it cannot be inlined.
 */
static int noinline do_test_wp_bit(void)
{
	char tmp_reg;
	int flag;

	__asm__ __volatile__(
		"	movb %0,%1	\n"
		"1:	movb %1,%0	\n"
		"	xorl %2,%2	\n"
		"2:			\n"
		".section __ex_table,\"a\"\n"
		"	.align 4	\n"
		"	.long 1b,2b	\n"
		".previous		\n"
		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
		 "=q" (tmp_reg),
		 "=r" (flag)
		:"2" (1)
		:"memory");
	
	return flag;
}
Beispiel #21
0
static __always_inline void do_vgettimeofday(struct timeval * tv)
{
	long sequence, t;
	unsigned long sec, usec;

	do {
		sequence = read_seqbegin(&__xtime_lock);
		
		sec = __xtime.tv_sec;
		usec = (__xtime.tv_nsec / 1000) +
			(__jiffies - __wall_jiffies) * (1000000 / HZ);

		if (__vxtime.mode != VXTIME_HPET) {
			t = get_cycles_sync();
			if (t < __vxtime.last_tsc)
				t = __vxtime.last_tsc;
			usec += ((t - __vxtime.last_tsc) *
				 __vxtime.tsc_quot) >> 32;
			/* See comment in x86_64 do_gettimeofday. */
		} else {
			usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
				  __vxtime.last) * __vxtime.quot) >> 32;
		}
	} while (read_seqretry(&__xtime_lock, sequence));
Beispiel #22
0
static cycle_t __vsyscall_fn vread_hpet(void)
{
	return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
}
void __dummy__(void)
{
    OFFSET(UREGS_eax, struct cpu_user_regs, eax);
    OFFSET(UREGS_ebx, struct cpu_user_regs, ebx);
    OFFSET(UREGS_ecx, struct cpu_user_regs, ecx);
    OFFSET(UREGS_edx, struct cpu_user_regs, edx);
    OFFSET(UREGS_esi, struct cpu_user_regs, esi);
    OFFSET(UREGS_edi, struct cpu_user_regs, edi);
    OFFSET(UREGS_esp, struct cpu_user_regs, esp);
    OFFSET(UREGS_ebp, struct cpu_user_regs, ebp);
    OFFSET(UREGS_eip, struct cpu_user_regs, eip);
    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
    OFFSET(UREGS_ds, struct cpu_user_regs, ds);
    OFFSET(UREGS_es, struct cpu_user_regs, es);
    OFFSET(UREGS_fs, struct cpu_user_regs, fs);
    OFFSET(UREGS_gs, struct cpu_user_regs, gs);
    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
    BLANK();

    OFFSET(VCPU_processor, struct vcpu, processor);
    OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
    OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
    OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
    OFFSET(VCPU_event_sel, struct vcpu,
           arch.guest_context.event_callback_cs);
    OFFSET(VCPU_event_addr, struct vcpu, 
           arch.guest_context.event_callback_eip);
    OFFSET(VCPU_failsafe_sel, struct vcpu,
           arch.guest_context.failsafe_callback_cs);
    OFFSET(VCPU_failsafe_addr, struct vcpu,
           arch.guest_context.failsafe_callback_eip);
    OFFSET(VCPU_kernel_ss, struct vcpu,
           arch.guest_context.kernel_ss);
    OFFSET(VCPU_kernel_sp, struct vcpu,
           arch.guest_context.kernel_sp);
    OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
    OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
    DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
    BLANK();

    OFFSET(TSS_ss0, struct tss_struct, ss0);
    OFFSET(TSS_esp0, struct tss_struct, esp0);
    OFFSET(TSS_ss1, struct tss_struct, ss1);
    OFFSET(TSS_esp1, struct tss_struct, esp1);
    DEFINE(TSS_sizeof, sizeof(struct tss_struct));
    BLANK();

    OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
    OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
    BLANK();

    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
#ifndef VMXASSIST
    OFFSET(VCPU_vmx_emul, struct vcpu, arch.hvm_vmx.vmxemul);
#endif
    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
    BLANK();

    OFFSET(VMCB_rax, struct vmcb_struct, rax);
    OFFSET(VMCB_rip, struct vmcb_struct, rip);
    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
    BLANK();

    OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
    OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
    BLANK();

    DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
    BLANK();

    OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
    OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
    OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
    OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
    BLANK();

#if PERF_COUNTERS
    DEFINE(PERFC_hypercalls, PERFC_hypercalls);
    DEFINE(PERFC_exceptions, PERFC_exceptions);
    BLANK();
#endif

    DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE));
    BLANK();

    DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
    BLANK();

    OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]);
}
Beispiel #24
0
static int __do_suspend(void *ignore)
{
	int i, j, k, fpp, err;

	extern unsigned long max_pfn;
	extern unsigned long *pfn_to_mfn_frame_list_list;
	extern unsigned long *pfn_to_mfn_frame_list[];

	extern void time_resume(void);

	BUG_ON(smp_processor_id() != 0);
	BUG_ON(in_interrupt());

	if (xen_feature(XENFEAT_auto_translated_physmap)) {
		printk(KERN_WARNING "Cannot suspend in "
		       "auto_translated_physmap mode.\n");
		return -EOPNOTSUPP;
	}

	err = smp_suspend();
	if (err)
		return err;

	xenbus_suspend();

	preempt_disable();

#ifdef __i386__
	kmem_cache_shrink(pgd_cache);
#endif
	mm_pin_all();

	__cli();
	preempt_enable();

	gnttab_suspend();

	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
	clear_fixmap(FIX_SHARED_INFO);

	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);

	/*
	 * We'll stop somewhere inside this hypercall. When it returns,
	 * we'll start resuming after the restore.
	 */
	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));

	shutting_down = SHUTDOWN_INVALID;

	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);

	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);

	memset(empty_zero_page, 0, PAGE_SIZE);

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		virt_to_mfn(pfn_to_mfn_frame_list_list);

	fpp = PAGE_SIZE/sizeof(unsigned long);
	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
		if ((j % fpp) == 0) {
			k++;
			pfn_to_mfn_frame_list_list[k] =
				virt_to_mfn(pfn_to_mfn_frame_list[k]);
			j = 0;
		}
		pfn_to_mfn_frame_list[k][j] =
			virt_to_mfn(&phys_to_machine_mapping[i]);
	}
	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;

	gnttab_resume();

	irq_resume();

	time_resume();

	switch_idle_mm();

	__sti();

	xencons_resume();

	xenbus_resume();

	smp_resume();

	return err;
}
Beispiel #25
0
void __init xen_start_kernel(void)
{
	unsigned int i;
	struct xen_machphys_mapping mapping;
	unsigned long machine_to_phys_nr_ents;
#ifdef CONFIG_X86_32
	struct xen_platform_parameters pp;
	extern pte_t swapper_pg_fixmap[PTRS_PER_PTE];
	unsigned long addr;
#endif

	xen_setup_features();

	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
		machine_to_phys_nr_ents = mapping.max_mfn + 1;
	} else
		machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
		machine_to_phys_order++;

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		phys_to_machine_mapping =
			(unsigned long *)xen_start_info->mfn_list;

	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_writable_pagetables));

	reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE),
		      __pa(xen_start_info->pt_base)
		      + (xen_start_info->nr_pt_frames << PAGE_SHIFT),
		      "Xen provided");

#ifdef CONFIG_X86_32
	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_4gb_segments));

	init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;

	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
		hypervisor_virt_start = pp.virt_start;
		reserve_top_address(0UL - pp.virt_start);
	}

	BUG_ON(pte_index(hypervisor_virt_start));

	/* Do an early initialization of the fixmap area */
	make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables);
	addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE);
	set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr),
				      addr),
			   addr),
		__pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE));
#else
	check_efer();
	xen_init_pt();
#endif

#define __FIXADDR_TOP (-PAGE_SIZE)
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \
			!= pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE)))
	FIX_BUG_ON(SHARED_INFO);
	FIX_BUG_ON(ISAMAP_BEGIN);
	FIX_BUG_ON(ISAMAP_END);
#undef pmd_index
#undef __FIXADDR_TOP

	/* Switch to the real shared_info page, and clear the dummy page. */
	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
	memset(empty_zero_page, 0, sizeof(empty_zero_page));

	setup_vcpu_info(0);

	/* Set up mapping of lowest 1MB of physical memory. */
	for (i = 0; i < NR_FIX_ISAMAPS; i++)
		if (is_initial_xendomain())
			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
		else
			__set_fixmap(FIX_ISAMAP_BEGIN - i,
				     virt_to_machine(empty_zero_page),
				     PAGE_KERNEL_RO);

}