Exemplo n.º 1
0
/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd_table;

#ifdef CONFIG_X86_PAE
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
		pmd_table = (pmd_t *)alloc_low_page();
		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		p4d = p4d_offset(pgd, 0);
		pud = pud_offset(p4d, 0);
		BUG_ON(pmd_table != pmd_offset(pud, 0));

		return pmd_table;
	}
Exemplo n.º 2
0
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
			     phys_addr_t addr)
{
	pgd_t *pgd;
	pud_t *pud;

	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
	if (WARN_ON(pgd_none(*pgd))) {
		if (!cache)
			return NULL;
		pud = mmu_memory_cache_alloc(cache);
		pgd_populate(NULL, pgd, pud);
		get_page(virt_to_page(pgd));
	}

	return pud_offset(pgd, addr);
}
Exemplo n.º 3
0
static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
				unsigned long addr, unsigned long end,
				swp_entry_t entry, struct page *page)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
		if (unuse_pmd_range(vma, pud, addr, next, entry, page))
			return 1;
	} while (pud++, addr = next, addr != end);
	return 0;
}
Exemplo n.º 4
0
Arquivo: init.c Projeto: Gaffey/linux
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long vaddr;

	vaddr = PKMAP_BASE;
	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + pgd_index(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
	pkmap_page_table = pte;
}
Exemplo n.º 5
0
static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
			      phys_addr_t addr, phys_addr_t end)
{
	pud_t *pud;
	phys_addr_t next;

	pud = pud_offset(pgd, addr);
	do {
		next = kvm_pud_addr_end(addr, end);
		if (!pud_none(*pud)) {
			if (pud_huge(*pud))
				kvm_flush_dcache_pud(*pud);
			else
				stage2_flush_pmds(kvm, pud, addr, next);
		}
	} while (pud++, addr = next, addr != end);
}
Exemplo n.º 6
0
pte_t *lookup_address(unsigned long address) 
{ 
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
        return pte_offset_kernel(pmd, address);
} 
Exemplo n.º 7
0
static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
			unsigned long addr, unsigned long end,
			unsigned char *vec)
{
	unsigned long next;
	pud_t *pud;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			mincore_unmapped_range(vma, addr, next, vec);
		else
			mincore_pmd_range(vma, pud, addr, next, vec);
		vec += (next - addr) >> PAGE_SHIFT;
	} while (pud++, addr = next, addr != end);
}
Exemplo n.º 8
0
asmlinkage long long sys_my_syscall(int pid, unsigned long long va)
{
	unsigned long long pageFN;
	unsigned long long pa;

	pgd_t *pgd;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *pte;
	
	struct mm_struct *mm;

	int found = 0;

	struct task_struct *task;
	for_each_process(task)
	{
		if(task->pid == pid)
			mm = task->mm;
	}
	pgd  = pgd_offset(mm,va);
	if(!pgd_none(*pgd) && !pgd_bad(*pgd))
	{
		pud = pud_offset(pgd,va);
		if(!pud_none(*pud) && !pud_bad(*pud))
		{
			pmd = pmd_offset(pud,va);
			if(!pmd_none(*pmd) && !pmd_bad(*pmd))
			{
				pte = pte_offset_kernel(pmd,va);
				if(!pte_none(*pte))
				{
					pageFN = pte_pfn(*pte);
					pa = ((pageFN<<12)|(va&0x00000FFF));
					found = 1;
					return pa;
				}
			}
		}
	}
	if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
	{
		unsigned long long swapID = (pte_val(*pte) >> 32);
		found = 1;
		return swapID;
	}
Exemplo n.º 9
0
static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
			      unsigned long flags)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep, pte;
	pgprot_t prot;

	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);

	if (!flags)
		flags = 1; /* 1 = CB0-1 device */

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
		pudp = (pud_t *)sh64_get_page();
		set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
	}

	pudp = pud_offset(pgdp, va);
	if (pud_none(*pudp) || !pud_present(*pudp)) {
		pmdp = (pmd_t *)sh64_get_page();
		set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pudp, va);
	if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
		ptep = (pte_t *)sh64_get_page();
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);

	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
	ptep = pte_offset_kernel(pmdp, va);

	if (!pte_none(*ptep) &&
	    pte_val(*ptep) != pte_val(pte))
		pte_ERROR(*ptep);

	set_pte(ptep, pte);

	flush_tlb_kernel_range(va, PAGE_SIZE);
}
Exemplo n.º 10
0
pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_offset(mm, addr);
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
	pte = pte_alloc_map(mm, NULL, pmd, addr);
	pgd->pgd &= ~_PAGE_SZ_MASK;
	pgd->pgd |= _PAGE_SZHUGE;

	return pte;
}
Exemplo n.º 11
0
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
                          phys_addr_t addr, const pte_t *new_pte, bool iomap)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte, old_pte;

    /* Create 2nd stage page table mapping - Level 1 */
    pgd = kvm->arch.pgd + pgd_index(addr);
    pud = pud_offset(pgd, addr);
    if (pud_none(*pud)) {
        if (!cache)
            return 0; /* ignore calls from kvm_set_spte_hva */
        pmd = mmu_memory_cache_alloc(cache);
        pud_populate(NULL, pud, pmd);
        get_page(virt_to_page(pud));
    }

    pmd = pmd_offset(pud, addr);

    /* Create 2nd stage page table mapping - Level 2 */
    if (pmd_none(*pmd)) {
        if (!cache)
            return 0; /* ignore calls from kvm_set_spte_hva */
        pte = mmu_memory_cache_alloc(cache);
        kvm_clean_pte(pte);
        pmd_populate_kernel(NULL, pmd, pte);
        get_page(virt_to_page(pmd));
    }

    pte = pte_offset_kernel(pmd, addr);

    if (iomap && pte_present(*pte))
        return -EFAULT;

    /* Create 2nd stage page table mapping - Level 3 */
    old_pte = *pte;
    kvm_set_pte(pte, *new_pte);
    if (pte_present(old_pte))
        kvm_tlb_flush_vmid_ipa(kvm, addr);
    else
        get_page(virt_to_page(pte));

    return 0;
}
Exemplo n.º 12
0
static int early_map_kernel_page(unsigned long ea, unsigned long pa,
			  pgprot_t flags,
			  unsigned int map_page_size,
			  int nid,
			  unsigned long region_start, unsigned long region_end)
{
	unsigned long pfn = pa >> PAGE_SHIFT;
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	pgdp = pgd_offset_k(ea);
	if (pgd_none(*pgdp)) {
		pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
						region_start, region_end);
		pgd_populate(&init_mm, pgdp, pudp);
	}
	pudp = pud_offset(pgdp, ea);
	if (map_page_size == PUD_SIZE) {
		ptep = (pte_t *)pudp;
		goto set_the_pte;
	}
	if (pud_none(*pudp)) {
		pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
						region_start, region_end);
		pud_populate(&init_mm, pudp, pmdp);
	}
	pmdp = pmd_offset(pudp, ea);
	if (map_page_size == PMD_SIZE) {
		ptep = pmdp_ptep(pmdp);
		goto set_the_pte;
	}
	if (!pmd_present(*pmdp)) {
		ptep = early_alloc_pgtable(PAGE_SIZE, nid,
						region_start, region_end);
		pmd_populate_kernel(&init_mm, pmdp, ptep);
	}
	ptep = pte_offset_kernel(pmdp, ea);

set_the_pte:
	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
	smp_wmb();
	return 0;
}
Exemplo n.º 13
0
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmdp;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmdp = pmd_offset(pud, addr);
	do {
		pmd_t pmd = *pmdp;

		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PMD_SIZE;
		pmdp += 2;
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}
Exemplo n.º 14
0
/*
 * NOTE: The pagetables are allocated contiguous on the physical space
 * so we can cache the place of the first one and move around without
 * checking the pgd every time.
 */
static void __init page_table_range_init(unsigned long start,
					 unsigned long end, pgd_t *pgd_base)
{
	pgd_t *pgd;
	int pgd_idx;
	unsigned long vaddr;

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
		pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr);
		if (pmd_none(*pmd))
			assign_pte(pmd, alloc_pte());
		vaddr += PMD_SIZE;
	}
}
Exemplo n.º 15
0
static void machine_kexec_page_table_set_one(
	pgd_t *pgd, pmd_t *pmd, pte_t *pte,
	unsigned long vaddr, unsigned long paddr)
{
	pud_t *pud;

	pgd += pgd_index(vaddr);
#ifdef CONFIG_X86_PAE
	if (!(pgd_val(*pgd) & _PAGE_PRESENT))
		set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT));
#endif
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	if (!(pmd_val(*pmd) & _PAGE_PRESENT))
		set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
}
Exemplo n.º 16
0
/*
 * The VSYSCALL page is the only user-accessible page in the kernel address
 * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
 * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
 * are enabled.
 *
 * Some day we may create a "minimal" vsyscall mode in which we emulate
 * vsyscalls but leave the page not present.  If so, we skip calling
 * this.
 */
void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
	set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
	p4d = p4d_offset(pgd, VSYSCALL_ADDR);
#if CONFIG_PGTABLE_LEVELS >= 5
	set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
#endif
	pud = pud_offset(p4d, VSYSCALL_ADDR);
	set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
	pmd = pmd_offset(pud, VSYSCALL_ADDR);
	set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
}
Exemplo n.º 17
0
// translate the gva to gpa in VM
u64 trans_addr(void *mem, u64 addr, u64 cr3)
{
    u64 pgd_gva;
    u64 pud_gva;
    u64 pmd_gva;
    u64 pte_gva;

    pgd_t *pgd_hva;
    pud_t *pud_hva;
    pmd_t *pmd_hva;
    pte_t *pte_hva;

    // Page Global Directory
    pgd_gva = pgd_offset(cr3, addr);
    //printf("pgd_gva: 0x%016lx\n", pgd_gva);
    pgd_hva = (pgd_t *)(mem+virt_to_phys(pgd_gva));
    //printf("pgd_hva: 0x%016lx\n", (unsigned long)pgd_hva);

    // Page Upper Directory
    pud_gva = pud_offset(pgd_hva, addr);
    //printf("pud_gva: 0x%016lx\n", pud_gva);
    pud_hva = (pud_t *)(mem+virt_to_phys(pud_gva));
    //printf("pud_hva: 0x%016lx\n", (unsigned long)pud_hva->pud);

    // Page Middle Directory
    pmd_gva = pmd_offset(pud_hva, addr);
    //printf("pmd_gva: 0x%016lx\n", pmd_gva);
    pmd_hva = (pmd_t *)(mem+virt_to_phys(pmd_gva));
    //printf("pmd_hva: 0x%016lx\n", (unsigned long)pmd_hva->pmd);

    // Page Table Entry
    pte_gva = pte_offset_kernel(pmd_hva, addr);
    //printf("pte_gva: 0x%016lx\n", pte_gva);
    pte_hva = (pte_t *)(mem+virt_to_phys(pte_gva));
    //printf("pte_hva: 0x%016lx\n", (unsigned long)pte_hva->pte);

    u64 page_mask = page_level_mask(1);
    u64 phys_addr = pte_pfn(pte_hva) << PAGE_SHIFT;
    u64 offset    = addr & ~page_mask;

    //printf("Physical Address: 0x%016lx\n", (unsigned long)(phys_addr|offset));

    return (unsigned long)(phys_addr|offset);
}
Exemplo n.º 18
0
/*
 * paging_init() continues the virtual memory environment setup which
 * was begun by the code in arch/head.S.
 * The parameters are pointers to where to stick the starting and ending
 * addresses  of available kernel virtual memory.
 */
void __init paging_init(void)
{
	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};

	/* allocate some pages for kernel housekeeping tasks */
	empty_bad_page_table	= (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
	empty_bad_page		= (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
	empty_zero_page		= (unsigned long) alloc_bootmem_pages(PAGE_SIZE);

	memset((void *) empty_zero_page, 0, PAGE_SIZE);

#if CONFIG_HIGHMEM
	if (num_physpages - num_mappedpages) {
		pgd_t *pge;
		pud_t *pue;
		pmd_t *pme;

		pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE);

		memset(pkmap_page_table, 0, PAGE_SIZE);

		pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE);
		pue = pud_offset(pge, PKMAP_BASE);
		pme = pmd_offset(pue, PKMAP_BASE);
		__set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE);
	}
#endif

	/* distribute the allocatable pages across the various zones and pass them to the allocator
	 */
	zones_size[ZONE_DMA]     = max_low_pfn - min_low_pfn;
	zones_size[ZONE_NORMAL]  = 0;
#ifdef CONFIG_HIGHMEM
	zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
#endif

	free_area_init(zones_size);

#ifdef CONFIG_MMU
	/* initialise init's MMU context */
	init_new_context(&init_task, &init_mm);
#endif

} /* end paging_init() */
Exemplo n.º 19
0
/*
 * map_kernel_page currently only called by __ioremap
 * map_kernel_page adds an entry to the ioremap page table
 * and adds an entry to the HPT, possibly bolting it
 */
int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
	if (slab_is_available()) {
		pgdp = pgd_offset_k(ea);
		pudp = pud_alloc(&init_mm, pgdp, ea);
		if (!pudp)
			return -ENOMEM;
		pmdp = pmd_alloc(&init_mm, pudp, ea);
		if (!pmdp)
			return -ENOMEM;
		ptep = pte_alloc_kernel(pmdp, ea);
		if (!ptep)
			return -ENOMEM;
	} else {
		pgdp = pgd_offset_k(ea);
#ifndef __PAGETABLE_PUD_FOLDED
		if (pgd_none(*pgdp)) {
			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
			pgd_populate(&init_mm, pgdp, pudp);
		}
#endif /* !__PAGETABLE_PUD_FOLDED */
		pudp = pud_offset(pgdp, ea);
		if (pud_none(*pudp)) {
			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
			pud_populate(&init_mm, pudp, pmdp);
		}
		pmdp = pmd_offset(pudp, ea);
		if (!pmd_present(*pmdp)) {
			ptep = early_alloc_pgtable(PAGE_SIZE);
			pmd_populate_kernel(&init_mm, pmdp, ptep);
		}
		ptep = pte_offset_kernel(pmdp, ea);
	}
	set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));

	smp_wmb();
	return 0;
}
Exemplo n.º 20
0
/**
 * get_struct_page - Gets a struct page for a particular address
 * @address - the address of the page we need
 *
 * Two versions of this function have to be provided for working
 * between the 2.4 and 2.5 kernels. Rather than littering the
 * function with #defines, there is just two separate copies.
 * Look at the one that is relevant to the kernel you're using
 */
struct page *get_struct_page(unsigned long addr)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
	unsigned long pfn;
	struct page *page=NULL;

	mm = current->mm;
	/* Is this possible? */
	if (!mm) return NULL;

	spin_lock(&mm->page_table_lock);

	pgd = pgd_offset(mm, addr);
	if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
		pud = pud_offset(pgd, addr);
		if (!pud_none(*pud) && !pud_bad(*pud)) {
			pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
				/*
			 	* disable preemption because of potential kmap().
			 	* page_table_lock should already have disabled
			 	* preemtion.  But, be paranoid.
			 	*/
				preempt_disable();
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				pte_unmap(ptep);
				preempt_enable();
				if (pte_present(pte)) {
					pfn = pte_pfn(pte);
					if (pfn_valid(pfn))
						page = pte_page(pte);
				}
			}
		}
	}

	spin_unlock(&mm->page_table_lock);
	return page;
}
Exemplo n.º 21
0
/*
 * We can receive a page fault from a migrating PTE at any time.
 * Handle it by just waiting until the fault resolves.
 *
 * It's also possible to get a migrating kernel PTE that resolves
 * itself during the downcall from hypervisor to Linux.  We just check
 * here to see if the PTE seems valid, and if so we retry it.
 *
 * NOTE! We MUST NOT take any locks for this case.  We may be in an
 * interrupt or a critical region, and must do as little as possible.
 * Similarly, we can't use atomic ops here, since we may be handling a
 * fault caused by an atomic op access.
 *
 * If we find a migrating PTE while we're in an NMI context, and we're
 * at a PC that has a registered exception handler, we don't wait,
 * since this thread may (e.g.) have been interrupted while migrating
 * its own stack, which would then cause us to self-deadlock.
 */
static int handle_migrating_pte(pgd_t *pgd, int fault_num,
				unsigned long address, unsigned long pc,
				int is_kernel_mode, int write)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	pte_t pteval;

	if (pgd_addr_invalid(address))
		return 0;

	pgd += pgd_index(address);
	pud = pud_offset(pgd, address);
	if (!pud || !pud_present(*pud))
		return 0;
	pmd = pmd_offset(pud, address);
	if (!pmd || !pmd_present(*pmd))
		return 0;
	pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
		pte_offset_kernel(pmd, address);
	pteval = *pte;
	if (pte_migrating(pteval)) {
		if (in_nmi() && search_exception_tables(pc))
			return 0;
		wait_for_migration(pte);
		return 1;
	}

	if (!is_kernel_mode || !pte_present(pteval))
		return 0;
	if (fault_num == INT_ITLB_MISS) {
		if (pte_exec(pteval))
			return 1;
	} else if (write) {
		if (pte_write(pteval))
			return 1;
	} else {
		if (pte_read(pteval))
			return 1;
	}

	return 0;
}
Exemplo n.º 22
0
void __init mt_map_io(void)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    iotable_init(mt_io_desc, ARRAY_SIZE(mt_io_desc));

    /* set NS=1 for NS_GIC_CPU_BASE */
    pgd = pgd_offset(&init_mm, NS_GIC_CPU_BASE);
    pud = pud_offset(pgd, NS_GIC_CPU_BASE);
    pmd = pmd_offset(pud, NS_GIC_CPU_BASE);
    if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) {
        __raw_writel(__raw_readl(pmd) | PMD_TABLE_NS, pmd);
    } else {
        __raw_writel(__raw_readl(pmd) | PMD_SECT_NS, pmd);
    }
    flush_pmd_entry(pmd);
}
Exemplo n.º 23
0
static int get_gate_page(struct mm_struct *mm, unsigned long address,
		unsigned int gup_flags, struct vm_area_struct **vma,
		struct page **page)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int ret = -EFAULT;

	/* user gate pages are read-only */
	if (gup_flags & FOLL_WRITE)
		return -EFAULT;
	if (address > TASK_SIZE)
		pgd = pgd_offset_k(address);
	else
		pgd = pgd_offset_gate(mm, address);
	BUG_ON(pgd_none(*pgd));
	pud = pud_offset(pgd, address);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return -EFAULT;
	VM_BUG_ON(pmd_trans_huge(*pmd));
	pte = pte_offset_map(pmd, address);
	if (pte_none(*pte))
		goto unmap;
	*vma = get_gate_vma(mm);
	if (!page)
		goto out;
	*page = vm_normal_page(*vma, address, *pte);
	if (!*page) {
		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
			goto unmap;
		*page = pte_page(*pte);
	}
	get_page(*page);
out:
	ret = 0;
unmap:
	pte_unmap(pte);
	return ret;
}
Exemplo n.º 24
0
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	if (pgd) {
		pud = pud_offset(pgd, addr);
		if (pud) {
			pmd = pmd_offset(pud, addr);
			if (pmd)
				pte = pte_offset_map(pmd, addr);
		}
	}

	return pte;
}
Exemplo n.º 25
0
static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
			  struct mm_walk *walk)
{
	pud_t *pud;
	unsigned long next;
	int err = 0;

	pud = pud_offset(p4d, addr);
	do {
 again:
		next = pud_addr_end(addr, end);
		if (pud_none(*pud) || !walk->vma) {
			if (walk->pte_hole)
				err = walk->pte_hole(addr, next, walk);
			if (err)
				break;
			continue;
		}

		if (walk->pud_entry) {
			spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);

			if (ptl) {
				err = walk->pud_entry(pud, addr, next, walk);
				spin_unlock(ptl);
				if (err)
					break;
				continue;
			}
		}

		split_huge_pud(walk->vma, pud, addr);
		if (pud_none(*pud))
			goto again;

		if (walk->pmd_entry || walk->pte_entry)
			err = walk_pmd_range(pud, addr, next, walk);
		if (err)
			break;
	} while (pud++, addr = next, addr != end);

	return err;
}
Exemplo n.º 26
0
static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
				      unsigned long end, int nid)
{
	pud_t *pud;
	unsigned long next;

	if (p4d_none(*p4d)) {
		void *p = early_alloc(PAGE_SIZE, nid, true);

		p4d_populate(&init_mm, p4d, p);
	}

	pud = pud_offset(p4d, addr);
	do {
		next = pud_addr_end(addr, end);
		if (!pud_large(*pud))
			kasan_populate_pud(pud, addr, next, nid);
	} while (pud++, addr = next, addr != end);
}
Exemplo n.º 27
0
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
	pmd_t *pd;
	pte_t *pg;
	int err = -ENOMEM;

	/* Use upper 10 bits of VA to index the first level map */
	pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
	/* Use middle 10 bits of VA to index the second-level map */
	pg = pte_alloc_kernel(pd, va);
	if (pg != 0) {
		err = 0;
		/* The PTE should never be already set nor present in the
		 * hash table
		 */
		BUG_ON(pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE));
		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
						     __pgprot(flags)));
	}
Exemplo n.º 28
0
Arquivo: gup.c Projeto: 1800alex/linux
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
			int write, struct page **pages, int *nr)
{
	unsigned long next;
	pud_t *pudp;

	pudp = pud_offset(&pgd, addr);
	do {
		pud_t pud = *pudp;

		next = pud_addr_end(addr, end);
		if (pud_none(pud))
			return 0;
		if (!gup_pmd_range(pud, addr, next, write, pages, nr))
			return 0;
	} while (pudp++, addr = next, addr != end);

	return 1;
}
Exemplo n.º 29
0
static void nds32_suspend2ram(void)
{
	pgd_t *pgdv;
	pud_t *pudv;
	pmd_t *pmdv;
	pte_t *ptev;

	pgdv = (pgd_t *)__va((__nds32__mfsr(NDS32_SR_L1_PPTB) &
		L1_PPTB_mskBASE)) + pgd_index((unsigned int)cpu_resume);

	pudv = pud_offset(pgdv, (unsigned int)cpu_resume);
	pmdv = pmd_offset(pudv, (unsigned int)cpu_resume);
	ptev = pte_offset_map(pmdv, (unsigned int)cpu_resume);

	resume_addr = ((*ptev) & TLB_DATA_mskPPN)
			| ((unsigned int)cpu_resume & 0x00000fff);

	suspend2ram();
}
Exemplo n.º 30
0
void __init pagetable_init(void)
{
	unsigned long vaddr;
	pgd_t *pgd_base;
#ifdef CONFIG_HIGHMEM
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
#endif

	/* Initialize the entire pgd.  */
	pgd_init((unsigned long)swapper_pg_dir);
	pgd_init((unsigned long)swapper_pg_dir
		 + sizeof(pgd_t) * USER_PTRS_PER_PGD);

	pgd_base = swapper_pg_dir;

	/*
	 * Fixed mappings:
	 */
#ifdef CONFIG_BCM53000_HIGHMEM
	vaddr = __fix_to_virt(VALIAS_IDX(__end_of_fixed_addresses - 1)) & PMD_MASK;
#else
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
#endif
	fixrange_init(vaddr, 0, pgd_base);

#ifdef CONFIG_HIGHMEM
	/*
	 * Permanent kmaps:
	 */
	vaddr = PKMAP_BASE;
	fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + __pgd_offset(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
	pkmap_page_table = pte;
#endif
}