Beispiel #1
0
static void __init map_vsyscall(void)
{
	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
	if (hpet_address)
		__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL);
}
Beispiel #2
0
void __init map_vsyscall(void)
{
	extern char __vsyscall_page;
	unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
	unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);

	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
		     vsyscall_mode == NATIVE
		     ? PAGE_KERNEL_VSYSCALL
		     : PAGE_KERNEL_VVAR);
	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
		     (unsigned long)VSYSCALL_START);

	__set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
	BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
		     (unsigned long)VVAR_ADDRESS);
}
Beispiel #3
0
static void __init map_vsyscall(void)
{
	unsigned long physaddr_page0 = (unsigned long) &__vsyscall_0 - PAGE_OFFSET;

	/* Initially we map the VSYSCALL page w/ PAGE_KERNEL permissions to
	 * keep the alternate_instruction code from bombing out when it
	 * changes the seq_lock memory barriers in vgettimeofday()
	 */
	__set_fixmap(FIX_VSYSCALL_GTOD_FIRST_PAGE, physaddr_page0, PAGE_KERNEL);
}
Beispiel #4
0
static void __cpuinit trap_init_f00f_bug(void)
{
	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);

	/*
	 * Update the IDT descriptor and reload the IDT so that
	 * it uses the read-only mapped virtual address.
	 */
	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
	load_idt(&idt_descr);
}
Beispiel #5
0
void __init map_vsyscall(void)
{
	extern char __vsyscall_page;
	unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);

	if (vsyscall_mode != NONE) {
		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
			     PAGE_KERNEL_VVAR);
		set_vsyscall_pgtable_user_bits(swapper_pg_dir);
	}

	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
		     (unsigned long)VSYSCALL_ADDR);
}
Beispiel #6
0
void __init map_vsyscall(void)
{
	extern char __vsyscall_page;
	unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);

	if (vsyscall_mode != NONE)
		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
			     vsyscall_mode == NATIVE
			     ? PAGE_KERNEL_VSYSCALL
			     : PAGE_KERNEL_VVAR);

	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
		     (unsigned long)VSYSCALL_ADDR);
}
Beispiel #7
0
static void map_compat_vdso(int map)
{
    static int vdso_mapped;

    if (map == vdso_mapped)
        return;

    vdso_mapped = map;

    __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
                 map ? PAGE_READONLY_EXEC : PAGE_NONE);

    /* flush stray tlbs */
    flush_tlb_all();
}
Beispiel #8
0
int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
				 int size)
{
	int idx;

	WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);

	for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
		__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
			     __pa(i) + (idx*PAGE_SIZE),
			     PAGE_KERNEL_VVAR);
	}

	return 0;
}
Beispiel #9
0
static int __init remap_vsyscall(void)
{
	unsigned long physaddr_page0 = (unsigned long) &__vsyscall_0 - PAGE_OFFSET;

	if (!vsyscall_mapped)
		return 0;

	/* Remap the VSYSCALL page w/ PAGE_KERNEL_VSYSCALL permissions
	 * after the alternate_instruction code has run
	 */
	clear_fixmap(FIX_VSYSCALL_GTOD_FIRST_PAGE);
	__set_fixmap(FIX_VSYSCALL_GTOD_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);

	return 0;
}
Beispiel #10
0
int hpet_arch_init(void)
{
	unsigned int id;
	u64 tmp;

	if (!hpet_address)
		return -1;
	set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
	__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);

/*
 * Read the period, compute tick and quotient.
 */

	id = hpet_readl(HPET_ID);

	if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
		return -1;

	hpet_period = hpet_readl(HPET_PERIOD);
	if (hpet_period < 100000 || hpet_period > 100000000)
		return -1;

	hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;

	hpet_use_timer = (id & HPET_ID_LEGSUP);

	/*
	 * hpet period is in femto seconds per cycle
	 * so we need to convert this to ns/cyc units
	 * aproximated by mult/2^shift
	 *
	 *  fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
	 *  fsec/cyc * 1ns/1000000fsec * 2^shift = mult
	 *  fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
	 *  (fsec/cyc << shift)/1000000 = mult
	 *  (hpet_period << shift)/FSEC_PER_NSEC = mult
	 */
	tmp = (u64)hpet_period << HPET_SHIFT;
	do_div(tmp, FSEC_PER_NSEC);
	clocksource_hpet.mult = (u32)tmp;
	clocksource_register(&clocksource_hpet);

	return hpet_timer_stop_set_go(hpet_tick);
}
Beispiel #11
0
static void __init test_wp_bit(void)
{
	printk("Checking if this processor honours the WP bit even in supervisor mode... ");

	/* Any page-aligned address will do, the test is non-destructive */
	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
	boot_cpu_data.wp_works_ok = do_test_wp_bit();
	clear_fixmap(FIX_WP_TEST);

	if (!boot_cpu_data.wp_works_ok) {
		printk("No.\n");
#ifdef CONFIG_X86_WP_WORKS_OK
		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
#endif
	} else {
		printk("Ok.\n");
	}
}
Beispiel #12
0
void __init init_entry_mappings(void)
{
#if CONFIG_X86_HIGH_ENTRY

	void *tramp;

	/*
	 * We need a high IDT and GDT for the 4G/4G split:
	 */
	trap_init_virtual_IDT();

	__set_fixmap(FIX_ENTRY_TRAMPOLINE, __pa((unsigned long)&entry_tramp_start), PAGE_KERNEL_EXEC);
	tramp = (void *)fix_to_virt(FIX_ENTRY_TRAMPOLINE);

	printk("mapped 4G/4G trampoline to %p.\n", tramp);
	/*
	 * Virtual kernel stack:
	 */
	BUG_ON(__kmap_atomic_vaddr(KM_VSTACK0) & 8191);
	BUG_ON(sizeof(struct desc_struct)*NR_CPUS*GDT_ENTRIES > 2*PAGE_SIZE);
	BUG_ON((unsigned int)&entry_tramp_end - (unsigned int)&entry_tramp_start > PAGE_SIZE);

	/*
	 * set up the initial thread's virtual stack related
	 * fields:
	 */
	current->thread.stack_page0 = virt_to_page((char *)current);
	current->thread.stack_page1 = virt_to_page((char *)current + PAGE_SIZE);
	current->virtual_stack = (void *)__kmap_atomic_vaddr(KM_VSTACK0);

	__kunmap_atomic_type(KM_VSTACK0);
	__kunmap_atomic_type(KM_VSTACK1);
        __kmap_atomic(current->thread.stack_page0, KM_VSTACK0);
        __kmap_atomic(current->thread.stack_page1, KM_VSTACK1);

	return_path_start = ENTRY_TRAMP_ADDR(&return_path_start_marker);
	return_path_end = ENTRY_TRAMP_ADDR(&return_path_end_marker);
#endif
	current->real_stack = (void *)current;
	current->user_pgd = NULL;
	current->thread.esp0 = (unsigned long)current->real_stack + THREAD_SIZE;

}
Beispiel #13
0
int __init sysenter_setup(void)
{
	void *page = (void *)get_zeroed_page(GFP_ATOMIC);

	__set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);

	if (!boot_cpu_has(X86_FEATURE_SEP)) {
		memcpy(page,
		       &vsyscall_int80_start,
		       &vsyscall_int80_end - &vsyscall_int80_start);
		return 0;
	}

	memcpy(page,
	       &vsyscall_sysenter_start,
	       &vsyscall_sysenter_end - &vsyscall_sysenter_start);

	return 0;
}
Beispiel #14
0
int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
				 int size)
{
	int idx;

	WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);

	pvclock_vdso_info = i;

	for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
		__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
			     __pa(i) + (idx*PAGE_SIZE),
			     PAGE_KERNEL_VVAR);
	}


	register_task_migration_notifier(&pvclock_migrate);

	return 0;
}
Beispiel #15
0
static int __init sysenter_setup(void)
{
	unsigned long page = get_zeroed_page(GFP_ATOMIC);

	__set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);

	if (!boot_cpu_has(X86_FEATURE_SEP)) {
		memcpy((void *) page,
		       &vsyscall_int80_start,
		       &vsyscall_int80_end - &vsyscall_int80_start);
		return 0;
	}

	memcpy((void *) page,
	       &vsyscall_sysenter_start,
	       &vsyscall_sysenter_end - &vsyscall_sysenter_start);

	on_each_cpu(enable_sep_cpu, NULL, 1, 1);
	return 0;
}
Beispiel #16
0
void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
{
	static unsigned pages_mapped __initdata;
	unsigned i, pages;
	unsigned long offset;

	pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr);
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;

	if (pages_mapped + pages > MAX_EFI_IO_PAGES)
		return NULL;

	for (i = 0; i < pages; i++) {
		__set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
			     phys_addr, PAGE_KERNEL);
		phys_addr += PAGE_SIZE;
		pages_mapped++;
	}

	return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
					     (pages_mapped - pages)) + offset;
}
Beispiel #17
0
void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
{
	static unsigned pages_mapped;
	unsigned i, pages;

	/* phys_addr and size must be page aligned */
	if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK))
		return NULL;

	pages = size >> PAGE_SHIFT;
	if (pages_mapped + pages > MAX_EFI_IO_PAGES)
		return NULL;

	for (i = 0; i < pages; i++) {
		__set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
			     phys_addr, PAGE_KERNEL);
		phys_addr += PAGE_SIZE;
		pages_mapped++;
	}

	return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
					     (pages_mapped - pages));
}
Beispiel #18
0
void __init xen_start_kernel(void)
{
	unsigned int i;
	struct xen_machphys_mapping mapping;
	unsigned long machine_to_phys_nr_ents;
#ifdef CONFIG_X86_32
	struct xen_platform_parameters pp;
	extern pte_t swapper_pg_fixmap[PTRS_PER_PTE];
	unsigned long addr;
#endif

	xen_setup_features();

	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
		machine_to_phys_nr_ents = mapping.max_mfn + 1;
	} else
		machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
	while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
		machine_to_phys_order++;

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		phys_to_machine_mapping =
			(unsigned long *)xen_start_info->mfn_list;

	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_writable_pagetables));

	reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE),
		      __pa(xen_start_info->pt_base)
		      + (xen_start_info->nr_pt_frames << PAGE_SHIFT),
		      "Xen provided");

#ifdef CONFIG_X86_32
	WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
				     VMASST_TYPE_4gb_segments));

	init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;

	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
		hypervisor_virt_start = pp.virt_start;
		reserve_top_address(0UL - pp.virt_start);
	}

	BUG_ON(pte_index(hypervisor_virt_start));

	/* Do an early initialization of the fixmap area */
	make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables);
	addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE);
	set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr),
				      addr),
			   addr),
		__pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE));
#else
	check_efer();
	xen_init_pt();
#endif

#define __FIXADDR_TOP (-PAGE_SIZE)
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \
			!= pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE)))
	FIX_BUG_ON(SHARED_INFO);
	FIX_BUG_ON(ISAMAP_BEGIN);
	FIX_BUG_ON(ISAMAP_END);
#undef pmd_index
#undef __FIXADDR_TOP

	/* Switch to the real shared_info page, and clear the dummy page. */
	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
	memset(empty_zero_page, 0, sizeof(empty_zero_page));

	setup_vcpu_info(0);

	/* Set up mapping of lowest 1MB of physical memory. */
	for (i = 0; i < NR_FIX_ISAMAPS; i++)
		if (is_initial_xendomain())
			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
		else
			__set_fixmap(FIX_ISAMAP_BEGIN - i,
				     virt_to_machine(empty_zero_page),
				     PAGE_KERNEL_RO);

}