示例#1
0
/* Must run before zap_low_mappings */
__meminit void *early_ioremap(unsigned long addr, unsigned long size)
{
	unsigned long vaddr;
	pmd_t *pmd, *last_pmd;
	int i, pmds;

	pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
	vaddr = __START_KERNEL_map;
	pmd = level2_kernel_pgt;
	last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
	for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
		for (i = 0; i < pmds; i++) {
			if (pmd_present(pmd[i]))
				goto next;
		}
		vaddr += addr & ~PMD_MASK;
		addr &= PMD_MASK;
		for (i = 0; i < pmds; i++, addr += PMD_SIZE)
			set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
		__flush_tlb();
		return (void *)vaddr;
	next:
		;
	}
	printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
	return NULL;
}
示例#2
0
static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
{ 
	int i = pud_index(addr);


	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
		unsigned long pmd_phys;
		pud_t *pud = pud_page + pud_index(addr);
		pmd_t *pmd;

		if (addr >= end)
			break;

		if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
			set_pud(pud, __pud(0)); 
			continue;
		} 

		if (pud_val(*pud)) {
			phys_pmd_update(pud, addr, end);
			continue;
		}

		pmd = alloc_low_page(&pmd_phys);
		spin_lock(&init_mm.page_table_lock);
		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
		phys_pmd_init(pmd, addr, end);
		spin_unlock(&init_mm.page_table_lock);
		unmap_low_page(pmd);
	}
	__flush_tlb();
} 
示例#3
0
/*
 * We cannot call mmdrop() because we are in interrupt context, 
 * instead update mm->cpu_vm_mask.
 */
static inline void leave_mm (unsigned long cpu)
{
	if (read_pda(mmu_state) == TLBSTATE_OK)
		BUG();
	clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
	__flush_tlb();
}
示例#4
0
/*
 * We cannot call mmdrop() because we are in interrupt context, 
 * instead update mm->cpu_vm_mask.
 */
static void inline leave_mm (unsigned long cpu)
{
	if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
		BUG();
	clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
	/* flush TLB before it goes away. this stops speculative prefetches */
	__flush_tlb(); 
}
示例#5
0
int oleole_flush_guest_virt_memory(oleole_guest_system_t *gsys)
{
	unsigned long flags;
	unsigned long start, end;
	struct vm_area_struct	*vma;
	struct mm_struct *mm;
	pgd_t *pgd;

	spin_lock_irqsave(&gsys->lock, flags);
	vma = gsys->vma;
	spin_unlock_irqrestore(&gsys->lock, flags);

	if (!vma)
		return -1;

	mm = vma->vm_mm;

	if (!mm)
		return -1;

	start = vma->vm_start + OLEOLE_GUSET_VIRT_SPACE_OFFSET;
	end   = start         + 0x100000000UL;

	down_write(&mm->mmap_sem);

	pgd = pgd_offset(mm, start);
	if (!pgd_present(*pgd))
		goto miss;

	for (; start < end ; start += PUD_SIZE) {
		pud_t *pud;
		pmd_t *pmd;
		struct page *page;

		pud = pud_offset(pgd, start);
		if (!pud_present(*pud))
			goto miss;

		free_pmd_range(pud);

		pmd = pmd_offset(pud, 0);
		page = virt_to_page(pmd);
		__free_page(page);
		pud_clear(pud);
	}
miss:

	up_write(&mm->mmap_sem);

	__flush_tlb();

	return 0;
}
示例#6
0
/* To avoid virtual aliases later */
__meminit void early_iounmap(void *addr, unsigned long size)
{
	unsigned long vaddr;
	pmd_t *pmd;
	int i, pmds;

	vaddr = (unsigned long)addr;
	pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
	pmd = level2_kernel_pgt + pmd_index(vaddr);
	for (i = 0; i < pmds; i++)
		pmd_clear(pmd + i);
	__flush_tlb();
}
示例#7
0
文件: head64.c 项目: AmesianX/winkvm
static void __init zap_identity_mappings(void)
{
	pgd_t *pgd = pgd_offset_k(0UL);
	pgd_clear(pgd);
	__flush_tlb();
}