Example #1
0
void __flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	if (PageHighMem(page))
		addr = (unsigned long)__kmap_atomic(page);
	else
		addr = (unsigned long)page_address(page);

	flush_data_cache_page(addr);

	if (PageHighMem(page))
		__kunmap_atomic((void *)addr);
}
Example #2
0
void __update_cache(unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;
	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;

	pfn = pte_pfn(pte);
	if (unlikely(!pfn_valid(pfn)))
		return;
	page = pfn_to_page(pfn);
	if (Page_dcache_dirty(page)) {
		if (PageHighMem(page))
			addr = (unsigned long)__kmap_atomic(page);
		else
			addr = (unsigned long)page_address(page);

		if (exec || pages_do_alias(addr, address & PAGE_MASK))
			flush_data_cache_page(addr);

		if (PageHighMem(page))
			__kunmap_atomic((void *)addr);

		ClearPageDcacheDirty(page);
	}
}
Example #3
0
void __init init_entry_mappings(void)
{
#if CONFIG_X86_HIGH_ENTRY

	void *tramp;

	/*
	 * We need a high IDT and GDT for the 4G/4G split:
	 */
	trap_init_virtual_IDT();

	__set_fixmap(FIX_ENTRY_TRAMPOLINE, __pa((unsigned long)&entry_tramp_start), PAGE_KERNEL_EXEC);
	tramp = (void *)fix_to_virt(FIX_ENTRY_TRAMPOLINE);

	printk("mapped 4G/4G trampoline to %p.\n", tramp);
	/*
	 * Virtual kernel stack:
	 */
	BUG_ON(__kmap_atomic_vaddr(KM_VSTACK0) & 8191);
	BUG_ON(sizeof(struct desc_struct)*NR_CPUS*GDT_ENTRIES > 2*PAGE_SIZE);
	BUG_ON((unsigned int)&entry_tramp_end - (unsigned int)&entry_tramp_start > PAGE_SIZE);

	/*
	 * set up the initial thread's virtual stack related
	 * fields:
	 */
	current->thread.stack_page0 = virt_to_page((char *)current);
	current->thread.stack_page1 = virt_to_page((char *)current + PAGE_SIZE);
	current->virtual_stack = (void *)__kmap_atomic_vaddr(KM_VSTACK0);

	__kunmap_atomic_type(KM_VSTACK0);
	__kunmap_atomic_type(KM_VSTACK1);
        __kmap_atomic(current->thread.stack_page0, KM_VSTACK0);
        __kmap_atomic(current->thread.stack_page1, KM_VSTACK1);

	return_path_start = ENTRY_TRAMP_ADDR(&return_path_start_marker);
	return_path_end = ENTRY_TRAMP_ADDR(&return_path_end_marker);
#endif
	current->real_stack = (void *)current;
	current->user_pgd = NULL;
	current->thread.esp0 = (unsigned long)current->real_stack + THREAD_SIZE;

}
Example #4
0
/*
 *	switch_to(x,yn) should switch tasks from x to y.
 *
 * We fsave/fwait so that an exception goes off at the right time
 * (as a call from the fsave or fwait in effect) rather than to
 * the wrong process. Lazy FP saving no longer makes any sense
 * with modern CPU's, and this simplifies a lot of things (SMP
 * and UP become the same).
 *
 * NOTE! We used to use the x86 hardware context switching. The
 * reason for not using it any more becomes apparent when you
 * try to recover gracefully from saved state that is no longer
 * valid (stale segment register values in particular). With the
 * hardware task-switch, there is no way to fix up bad state in
 * a reasonable manner.
 *
 * The fact that Intel documents the hardware task-switching to
 * be slow is a fairly red herring - this code is not noticeably
 * faster. However, there _is_ some room for improvement here,
 * so the performance issues may eventually be a valid point.
 * More important, however, is the fact that this allows us much
 * more flexibility.
 *
 * The return value (in %eax) will be the "prev" task after
 * the task-switch, and shows up in ret_from_fork in entry.S,
 * for example.
 */
struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread,
				 *next = &next_p->thread;
	int cpu = smp_processor_id();
#ifndef CONFIG_X86_NO_TSS
	struct tss_struct *tss = init_tss + cpu;
#endif
	struct physdev_set_iopl iopl_op;
	struct physdev_set_iobitmap iobmp_op;
	multicall_entry_t _mcl[8], *mcl = _mcl;

	/* XEN NOTE: FS/GS saved in switch_mm(), not here. */
	if (next_p->mm)
		load_user_cs_desc(cpu, next_p->mm);

	/*
	 * This is basically '__unlazy_fpu', except that we queue a
	 * multicall to indicate FPU task switch, rather than
	 * synchronously trapping to Xen.
	 */
	if (prev_p->thread_info->status & TS_USEDFPU) {
		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
		mcl->op      = __HYPERVISOR_fpu_taskswitch;
		mcl->args[0] = 1;
		mcl++;
	}
#if 0 /* lazy fpu sanity check */
	else BUG_ON(!(read_cr0() & 8));
#endif
 
#ifdef CONFIG_X86_HIGH_ENTRY
{
	int i;
	/*
	 * Set the ptes of the virtual stack. (NOTE: a one-page TLB flush is
	 * needed because otherwise NMIs could interrupt the
	 * user-return code with a virtual stack and stale TLBs.)
	 */
	for (i = 0; i < ARRAY_SIZE(next->stack_page); i++) {
		__kunmap_atomic_type(KM_VSTACK_TOP-i);
		__kmap_atomic(next->stack_page[i], KM_VSTACK_TOP-i);
	}
	/*
	 * NOTE: here we rely on the task being the stack as well
	 */
	next_p->thread_info->virtual_stack =
			(void *)__kmap_atomic_vaddr(KM_VSTACK_TOP);
}
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
	/*
	 * If next was preempted on entry from userspace to kernel,
	 * and now it's on a different cpu, we need to adjust %esp.
	 * This assumes that entry.S does not copy %esp while on the
	 * virtual stack (with interrupts enabled): which is so,
	 * except within __SWITCH_KERNELSPACE itself.
	 */
	if (unlikely(next->esp >= TASK_SIZE)) {
		next->esp &= THREAD_SIZE - 1;
		next->esp |= (unsigned long) next_p->thread_info->virtual_stack;
	}
#endif
#endif
	/*
	 * Reload esp0, LDT and the page table pointer:
	 * This is load_esp0(tss, next) with a multicall.
	 */
	mcl->op      = __HYPERVISOR_stack_switch;
	mcl->args[0] = __KERNEL_DS;
	mcl->args[1] = next->esp0;
	mcl++;

	/*
	 * Load the per-thread Thread-Local Storage descriptor.
	 * This is load_TLS(next, cpu) with multicalls.
	 */
#define C(i) do {							\
	if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||	\
		     next->tls_array[i].b != prev->tls_array[i].b)) {	\
		mcl->op = __HYPERVISOR_update_descriptor;		\
		*(u64 *)&mcl->args[0] = virt_to_machine(		\
			&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
		*(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i];	\
		mcl++;							\
	}								\
} while (0)
	C(0); C(1); C(2);
#undef C

	if (unlikely(prev->io_pl != next->io_pl)) {
		iopl_op.iopl = (next->io_pl == 0) ? 1 :
			(next->io_pl >> 12) & 3;
		mcl->op      = __HYPERVISOR_physdev_op;
		mcl->args[0] = PHYSDEVOP_set_iopl;
		mcl->args[1] = (unsigned long)&iopl_op;
		mcl++;
	}