コード例 #1
0
ファイル: mmu.c プロジェクト: 0xheart0/linux
static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
			      phys_addr_t addr, phys_addr_t end)
{
	pmd_t *pmd;
	phys_addr_t next;

	pmd = pmd_offset(pud, addr);
	do {
		next = kvm_pmd_addr_end(addr, end);
		if (!pmd_none(*pmd)) {
			if (kvm_pmd_huge(*pmd)) {
				hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
				kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
			} else {
				stage2_flush_ptes(kvm, pmd, addr, next);
			}
		}
コード例 #2
0
static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
			      unsigned long flags)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep, pte;
	pgprot_t prot;

	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);

	if (!flags)
		flags = 1; /* 1 = CB0-1 device */

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
		pudp = (pud_t *)sh64_get_page();
		set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
	}

	pudp = pud_offset(pgdp, va);
	if (pud_none(*pudp) || !pud_present(*pudp)) {
		pmdp = (pmd_t *)sh64_get_page();
		set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pudp, va);
	if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
		ptep = (pte_t *)sh64_get_page();
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);

	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
	ptep = pte_offset_kernel(pmdp, va);

	if (!pte_none(*ptep) &&
	    pte_val(*ptep) != pte_val(pte))
		pte_ERROR(*ptep);

	set_pte(ptep, pte);

	flush_tlb_kernel_range(va, PAGE_SIZE);
}
コード例 #3
0
ファイル: mmu.c プロジェクト: monojo/xu3
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
			  phys_addr_t addr, const pte_t *new_pte, bool iomap)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte, old_pte;

	/* Create 2nd stage page table mapping - Level 1 */
	pgd = kvm->arch.pgd + pgd_index(addr);
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pmd = mmu_memory_cache_alloc(cache);
		pud_populate(NULL, pud, pmd);
		get_page(virt_to_page(pud));
	}

	pmd = pmd_offset(pud, addr);

	/* Create 2nd stage page table mapping - Level 2 */
	if (pmd_none(*pmd)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pte = mmu_memory_cache_alloc(cache);
		kvm_clean_pte(pte);
		pmd_populate_kernel(NULL, pmd, pte);
		get_page(virt_to_page(pmd));
	}

	pte = pte_offset_kernel(pmd, addr);

	if (iomap && pte_present(*pte))
		return -EFAULT;

	/* Create 2nd stage page table mapping - Level 3 */
	old_pte = *pte;
	kvm_set_pte(pte, *new_pte);
	if (pte_present(old_pte))
		kvm_tlb_flush_vmid_ipa(kvm, addr);
	else
		get_page(virt_to_page(pte));

	return 0;
}
コード例 #4
0
ファイル: mmu.c プロジェクト: TheGalaxyProject/tgpkernel-s7-o
static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
			      phys_addr_t addr, phys_addr_t end)
{
	pmd_t *pmd;
	phys_addr_t next;

	pmd = pmd_offset(pud, addr);
	do {
		next = kvm_pmd_addr_end(addr, end);
		if (!pmd_none(*pmd)) {
			if (kvm_pmd_huge(*pmd))
				kvm_flush_dcache_pmd(*pmd);
			else
				stage2_flush_ptes(kvm, pmd, addr, next);
		}
	} while (pmd++, addr = next, addr != end);
}
コード例 #5
0
ファイル: pageattr.c プロジェクト: 1x23/unifi-gpl
pte_t *lookup_address(unsigned long address) 
{ 
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
        return pte_offset_kernel(pmd, address);
} 
コード例 #6
0
ファイル: ioremap.c プロジェクト: Dzenik/kernel-source
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmdp;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmdp = pmd_offset(pud, addr);
	do {
		pmd_t pmd = *pmdp;

		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PMD_SIZE;
		pmdp += 2;
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}
コード例 #7
0
/*
 * Initialise the consistent memory allocation.
 */
static int __init consistent_init(void)
{
	int ret = 0;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int i = 0;
	u32 base = CONSISTENT_BASE;

	return 0;

#if 0
	do {
		pgd = pgd_offset(&init_mm, base);

		pud = pud_alloc(&init_mm, pgd, base);
		if (!pud) {
			printk(KERN_ERR "%s: no pud tables\n", __func__);
			ret = -ENOMEM;
			break;
		}

		pmd = pmd_alloc(&init_mm, pud, base);
		if (!pmd) {
			printk(KERN_ERR "%s: no pmd tables\n", __func__);
			ret = -ENOMEM;
			break;
		}
		WARN_ON(!pmd_none(*pmd));

		pte = pte_alloc_kernel(pmd, base);
		if (!pte) {
			printk(KERN_ERR "%s: no pte tables\n", __func__);
			ret = -ENOMEM;
			break;
		}

		consistent_pte[i++] = pte;
		base += (1 << PGDIR_SHIFT);
	} while (base < CONSISTENT_END);

	return ret;
#endif
}
コード例 #8
0
ファイル: hugetlbpage.c プロジェクト: dduval/kernel-rhel4
static hugepte_t *hugepte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pmd_t *pmd = NULL;

	BUG_ON(!in_hugepage_area(mm->context, addr));

	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd))
		return NULL;

	pmd = pmd_offset(pgd, addr);

	/* We shouldn't find a (normal) PTE page pointer here */
	BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));

	return (hugepte_t *)pmd;
}
コード例 #9
0
ファイル: init.c プロジェクト: AllenWeb/linux
/*
 * NOTE: The pagetables are allocated contiguous on the physical space
 * so we can cache the place of the first one and move around without
 * checking the pgd every time.
 */
static void __init page_table_range_init(unsigned long start,
					 unsigned long end, pgd_t *pgd_base)
{
	pgd_t *pgd;
	int pgd_idx;
	unsigned long vaddr;

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
		pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr);
		if (pmd_none(*pmd))
			assign_pte(pmd, alloc_pte());
		vaddr += PMD_SIZE;
	}
}
コード例 #10
0
ファイル: pagetable.c プロジェクト: baozich/scripts
/**
 * get_struct_page - Gets a struct page for a particular address
 * @address - the address of the page we need
 *
 * Two versions of this function have to be provided for working
 * between the 2.4 and 2.5 kernels. Rather than littering the
 * function with #defines, there is just two separate copies.
 * Look at the one that is relevant to the kernel you're using
 */
struct page *get_struct_page(unsigned long addr)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
	unsigned long pfn;
	struct page *page=NULL;

	mm = current->mm;
	/* Is this possible? */
	if (!mm) return NULL;

	spin_lock(&mm->page_table_lock);

	pgd = pgd_offset(mm, addr);
	if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
		pud = pud_offset(pgd, addr);
		if (!pud_none(*pud) && !pud_bad(*pud)) {
			pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
				/*
			 	* disable preemption because of potential kmap().
			 	* page_table_lock should already have disabled
			 	* preemtion.  But, be paranoid.
			 	*/
				preempt_disable();
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				pte_unmap(ptep);
				preempt_enable();
				if (pte_present(pte)) {
					pfn = pte_pfn(pte);
					if (pfn_valid(pfn))
						page = pte_page(pte);
				}
			}
		}
	}

	spin_unlock(&mm->page_table_lock);
	return page;
}
コード例 #11
0
ファイル: pagewalk.c プロジェクト: 285452612/ali_kernel
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
			  struct mm_walk *walk)
{
	pmd_t *pmd;
	unsigned long next;
	int err = 0;

	pmd = pmd_offset(pud, addr);
	do {
again:
		next = pmd_addr_end(addr, end);
		if (pmd_none(*pmd)) {
			if (walk->pte_hole)
				err = walk->pte_hole(addr, next, walk);
			if (err)
				break;
			continue;
		}
		/*
		 * This implies that each ->pmd_entry() handler
		 * needs to know about pmd_trans_huge() pmds
		 */
		if (walk->pmd_entry)
			err = walk->pmd_entry(pmd, addr, next, walk);
		if (err)
			break;

		/*
		 * Check this here so we only break down trans_huge
		 * pages when we _need_ to
		 */
		if (!walk->pte_entry)
			continue;

		split_huge_page_pmd(walk->mm, pmd);
		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
			goto again;
		err = walk_pte_range(pmd, addr, next, walk);
		if (err)
			break;
	} while (pmd++, addr = next, addr != end);

	return err;
}
コード例 #12
0
/*
 * First Level Translation Fault Handler
 *
 * We enter here because the first level page table doesn't contain
 * a valid entry for the address.
 *
 * If the address is in kernel space (>= TASK_SIZE), then we are
 * probably faulting in the vmalloc() area.
 *
 * If the init_task's first level page tables contains the relevant
 * entry, we copy the it to this task.  If not, we send the process
 * a signal, fixup the exception, or oops the kernel.
 *
 * NOTE! We MUST NOT take any locks for this case. We may be in an
 * interrupt or a critical region, and should only copy the information
 * from the master page table, nothing more.
 */
int do_translation_fault(unsigned long addr, unsigned int fsr,
			 struct pt_regs *regs)
{
	struct task_struct *tsk;
	int offset;
	pgd_t *pgd, *pgd_k;
	pmd_t *pmd, *pmd_k;

	if (addr < TASK_SIZE)
		return do_page_fault(addr, fsr, regs);

	offset = __pgd_offset(addr);

	/*
	 * FIXME: CP15 C1 is write only on ARMv3 architectures.
	 * You really need to read the value in the page table
	 * register, not a copy.
	 */
	pgd = cpu_get_pgd() + offset;
	pgd_k = init_mm.pgd + offset;

	if (pgd_none(*pgd_k))
		goto bad_area;

#if 0	/* note that we are two-level */
	if (!pgd_present(*pgd))
		set_pgd(pgd, *pgd_k);
#endif

	pmd_k = pmd_offset(pgd_k, addr);
	pmd   = pmd_offset(pgd, addr);

	if (pmd_none(*pmd_k))
		goto bad_area;

	set_pmd(pmd, *pmd_k);
	return 0;

bad_area:
	tsk = current;

	do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
	return 0;
}
コード例 #13
0
ファイル: mem.c プロジェクト: leonsh/eldk30ppc
struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
{
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
	unsigned long kaddr;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *ptep, pte;
	struct page *page = NULL;

	/* address is user VA; convert to kernel VA of desired page */
	kaddr = (address - vma->vm_start) + offset;
	kaddr = VMALLOC_VMADDR(kaddr);

	spin_lock(&init_mm.page_table_lock);

	/* Lookup page structure for kernel VA */
	pgd = pgd_offset(&init_mm, kaddr);
	if (pgd_none(*pgd) || pgd_bad(*pgd))
		goto out;
	pmd = pmd_offset(pgd, kaddr);
	if (pmd_none(*pmd) || pmd_bad(*pmd))
		goto out;
	ptep = pte_offset(pmd, kaddr);
	if (!ptep)
		goto out;
	pte = *ptep;
	if (!pte_present(pte))
		goto out;
	if (write && !pte_write(pte))
		goto out;
	page = pte_page(pte);
	if (!VALID_PAGE(page)) {
		page = NULL;
		goto out;
	}

	/* Increment reference count on page */
	get_page(page);

out:
	spin_unlock(&init_mm.page_table_lock);

	return page;
}
コード例 #14
0
ファイル: gup.c プロジェクト: LarryShang/linux
static int get_gate_page(struct mm_struct *mm, unsigned long address,
		unsigned int gup_flags, struct vm_area_struct **vma,
		struct page **page)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int ret = -EFAULT;

	/* user gate pages are read-only */
	if (gup_flags & FOLL_WRITE)
		return -EFAULT;
	if (address > TASK_SIZE)
		pgd = pgd_offset_k(address);
	else
		pgd = pgd_offset_gate(mm, address);
	BUG_ON(pgd_none(*pgd));
	pud = pud_offset(pgd, address);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return -EFAULT;
	VM_BUG_ON(pmd_trans_huge(*pmd));
	pte = pte_offset_map(pmd, address);
	if (pte_none(*pte))
		goto unmap;
	*vma = get_gate_vma(mm);
	if (!page)
		goto out;
	*page = vm_normal_page(*vma, address, *pte);
	if (!*page) {
		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
			goto unmap;
		*page = pte_page(*pte);
	}
	get_page(*page);
out:
	ret = 0;
unmap:
	pte_unmap(pte);
	return ret;
}
コード例 #15
0
ファイル: fault-common.c プロジェクト: robacklin/celinux
/*
 * First Level Translation Fault Handler
 *
 * We enter here because the first level page table doesn't contain
 * a valid entry for the address.
 *
 * If the address is in kernel space (>= TASK_SIZE), then we are
 * probably faulting in the vmalloc() area.
 *
 * If the init_task's first level page tables contains the relevant
 * entry, we copy the it to this task.  If not, we send the process
 * a signal, fixup the exception, or oops the kernel.
 *
 * NOTE! We MUST NOT take any locks for this case. We may be in an
 * interrupt or a critical region, and should only copy the information
 * from the master page table, nothing more.
 */
int do_translation_fault(unsigned long addr, int error_code, struct pt_regs *regs)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	int offset;
	pgd_t *pgd, *pgd_k;
	pmd_t *pmd, *pmd_k;

	if (addr < TASK_SIZE)
		return do_page_fault(addr, error_code, regs);

	offset = __pgd_offset(addr);

	/*
	 * FIXME: CP15 C1 is write only on ARMv3 architectures.
	 */
	pgd = cpu_get_pgd() + offset;
	pgd_k = init_mm.pgd + offset;

	if (pgd_none(*pgd_k))
		goto bad_area;

#if 0	/* note that we are two-level */
	if (!pgd_present(*pgd))
		set_pgd(pgd, *pgd_k);
#endif

	pmd_k = pmd_offset(pgd_k, addr);
	pmd   = pmd_offset(pgd, addr);

	if (pmd_none(*pmd_k))
		goto bad_area;

	set_pmd(pmd, *pmd_k);
	return 0;

bad_area:
	tsk = current;
	mm  = tsk->active_mm;

	do_bad_area(tsk, mm, addr, error_code, regs);
	return 0;
}
コード例 #16
0
/* Given PGD from the address space's page table, return the kernel
 * virtual mapping of the physical memory mapped at ADR.
 */
static inline unsigned long uvirt_to_kva(pgd_t * pgd, unsigned long adr)
{
	unsigned long ret = 0UL;
	pmd_t * pmd;
	pte_t * ptep, pte;

	if (!pgd_none(*pgd)) {
		pmd = pmd_offset(pgd, adr);
		if (!pmd_none(*pmd)) {
			ptep = pte_offset(pmd, adr);
			pte = *ptep;
			if (pte_present(pte)) {
				ret = (unsigned long) pte_page_address(pte);
				ret |= adr & (PAGE_SIZE - 1);
			}
		}
	}
	return ret;
}
コード例 #17
0
ファイル: device.c プロジェクト: klquicksall/Galaxy-Nexus-4.2
static u32 hwc_virt_to_phys(u32 arg)
{
	pmd_t *pmd;
	pte_t *ptep;

	pgd_t *pgd = pgd_offset(current->mm, arg);
	if (pgd_none(*pgd) || pgd_bad(*pgd))
		return 0;

	pmd = pmd_offset(pgd, arg);
	if (pmd_none(*pmd) || pmd_bad(*pmd))
		return 0;

	ptep = pte_offset_map(pmd, arg);
	if (ptep && pte_present(*ptep))
		return (PAGE_MASK & *ptep) | (~PAGE_MASK & arg);

	return 0;
}
コード例 #18
0
ファイル: exit.c プロジェクト: bingone/fuckOS
int exit_mm(struct mm_struct *mm)
{
	pmd_t *pmd;
	pgd_t *pgd;
	uint32_t pgdno, pmdno;
	physaddr_t pa;

	struct vm_area_struct* vma = mm->mmap;
	struct page *page;
	
	if(!mm || !mm->mm_pgd)
		return 0;

	if(!atomic_dec_and_test(&mm->mm_count))
		return 0;

	delete_all_vma(mm);

	for (pgdno = 0; pgdno < pgd_index(KERNEL_BASE_ADDR); pgdno++) {
		pgd = mm->mm_pgd + pgdno;
		if(!pgd_present(*pgd) || pgd_none(*pgd))
			continue;
		pmd_t* tmp = (pmd_t *)pgd_page_vaddr(*pgd);
		
		for (pmdno = 0; pmdno < PTRS_PER_PMD; pmdno++) {
			pmd = tmp +  pmdno;
			if(!pmd_present(*pmd) || pmd_none(*pmd))
				continue;
			struct page* p = virt2page(pmd_page_vaddr(*pmd));
			page_decref(p);
			pmd_set(pmd,0,0);
		}
		struct page* p = virt2page(pgd_page_vaddr(*pgd));
		page_decref(p);
		pgd_set(pgd,0,0);
	}

	page = virt2page((viraddr_t)mm->mm_pgd);
	page_free(page);
	kfree(mm);

	return 0;
}
コード例 #19
0
ファイル: mmu.c プロジェクト: 0x00evil/linux
/**
 * stage2_wp_pmds - write protect PUD range
 * @pud:	pointer to pud entry
 * @addr:	range start address
 * @end:	range end address
 */
static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
{
	pmd_t *pmd;
	phys_addr_t next;

	pmd = pmd_offset(pud, addr);

	do {
		next = kvm_pmd_addr_end(addr, end);
		if (!pmd_none(*pmd)) {
			if (kvm_pmd_huge(*pmd)) {
				if (!kvm_s2pmd_readonly(pmd))
					kvm_set_s2pmd_readonly(pmd);
			} else {
				stage2_wp_ptes(pmd, addr, next);
			}
		}
	} while (pmd++, addr = next, addr != end);
}
コード例 #20
0
static u32 virt2phys(u32 usr)
{
	pmd_t *pmd;
	pte_t *ptep;
	pgd_t *pgd = pgd_offset(current->mm, usr);

	if (pgd_none(*pgd) || pgd_bad(*pgd))
		return 0;

	pmd = pmd_offset(pgd, usr);
	if (pmd_none(*pmd) || pmd_bad(*pmd))
		return 0;

	ptep = pte_offset_map(pmd, usr);
	if (ptep && pte_present(*ptep))
		return (*ptep & PAGE_MASK) | (~PAGE_MASK & usr);

	return 0;
}
コード例 #21
0
/*
 * This is useful to dump out the page tables associated with
 * 'addr' in mm 'mm'.
 */
void show_pte(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;

	if (!mm)
		mm = &init_mm;

	printk(KERN_ALERT "pgd = %p\n", mm->pgd);
	pgd = pgd_offset(mm, addr);
	printk(KERN_ALERT "*pgd = %08lx", pgd_val(*pgd));

	do {
		pmd_t *pmd;
		pte_t *pte;

		if (pgd_none(*pgd))
			break;

		if (pgd_bad(*pgd)) {
			printk("(bad)");
			break;
		}

		pmd = pmd_offset(pgd, addr);
		printk(", *pmd = %08lx", pmd_val(*pmd));

		if (pmd_none(*pmd))
			break;

		if (pmd_bad(*pmd)) {
			printk("(bad)");
			break;
		}

		pte = pte_offset(pmd, addr);
		printk(", *pte = %08lx", pte_val(*pte));
#ifdef CONFIG_CPU_32
		printk(", *ppte = %08lx", pte_val(pte[-PTRS_PER_PTE]));
#endif
	} while(0);

	printk("\n");
}
コード例 #22
0
ファイル: gup.c プロジェクト: 1800alex/linux
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pmd_t *pmdp;

	pmdp = pmd_offset(&pud, addr);
	do {
		pmd_t pmd = *pmdp;

		next = pmd_addr_end(addr, end);
		if (pmd_none(pmd))
			return 0;
		if (!gup_pte_range(pmd, addr, next, write, pages, nr))
			return 0;
	} while (pmdp++, addr = next, addr != end);

	return 1;
}
コード例 #23
0
ファイル: pagetable.c プロジェクト: baozich/scripts
/*
 * Again because of the changes in page table walking, a 2.4 and 2.5
 * version is supplied
 */
inline unsigned long forall_pte_pmd(struct mm_struct *mm, pmd_t *pmd, 
		unsigned long start, unsigned long end, 
		unsigned long *sched_count,
		void *data,
		unsigned long (*func)(pte_t *, unsigned long, void *))
{
	
	pte_t *ptep, pte;
	unsigned long pmd_end;
	unsigned long ret=0;

	if (pmd_none(*pmd)) return 0;

	pmd_end = (start + PMD_SIZE) & PMD_MASK;
	if (end > pmd_end) end = pmd_end;

	do {
		preempt_disable();
		ptep = pte_offset_map(pmd, start);
		pte = *ptep;
		pte_unmap(ptep);
		preempt_enable();

		/* Call the if a PTE is available */
		if (!pte_none(pte)) {

			/*
			 * Call schedule if necessary
			 *	Can func() block or be preempted?
			 *	It seems the sched_count won't be guarnateed
			 *	accurate.
			 */
			spin_unlock(&mm->page_table_lock);
			check_resched(sched_count);
			ret += func(&pte, start, data);
			spin_lock(&mm->page_table_lock);
		}
		start += PAGE_SIZE;
	} while (start && (start < end));

	return ret;
}
コード例 #24
0
ファイル: exmap.c プロジェクト: jbert/exmap
/* Modelled on __follow_page. Except we don't support HUGETLB and we
 * only actually use the pfn or pte, rather than getting hold of the
 * struct page. */
static int walk_page_tables(struct mm_struct *mm,
			    unsigned long address,
			    pte_t *pte_ret)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *ptep;
#ifdef HAVE_PUD_T
	pud_t *pud;
#endif

	// No support for HUGETLB as yet
	//page = follow_huge_addr(mm, address, write);
	//if (! IS_ERR(page))
	//return page;

	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		goto out;

#ifdef HAVE_PUD_T
	pud = pud_offset(pgd, address);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
		goto out;
	
	pmd = pmd_offset(pud, address);
#else
	pmd = pmd_offset(pgd, address);
#endif
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
		goto out;
	ptep = pte_offset_map(pmd, address);
	if (!ptep)
		goto out;

	*pte_ret = *ptep;
	pte_unmap(ptep);

	return 0;
out:
	return -1;
}
コード例 #25
0
ファイル: mempolicy.c プロジェクト: foxsat-hdr/linux-kernel
/* Ensure all existing pages follow the policy. */
static int
verify_pages(struct mm_struct *mm,
	     unsigned long addr, unsigned long end, unsigned long *nodes)
{
	while (addr < end) {
		struct page *p;
		pte_t *pte;
		pmd_t *pmd;
		pud_t *pud;
		pgd_t *pgd;
		pgd = pgd_offset(mm, addr);
		if (pgd_none(*pgd)) {
			unsigned long next = (addr + PGDIR_SIZE) & PGDIR_MASK;
			if (next > addr)
				break;
			addr = next;
			continue;
		}
		pud = pud_offset(pgd, addr);
		if (pud_none(*pud)) {
			addr = (addr + PUD_SIZE) & PUD_MASK;
			continue;
		}
		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			addr = (addr + PMD_SIZE) & PMD_MASK;
			continue;
		}
		p = NULL;
		pte = pte_offset_map(pmd, addr);
		if (pte_present(*pte))
			p = pte_page(*pte);
		pte_unmap(pte);
		if (p) {
			unsigned nid = page_to_nid(p);
			if (!test_bit(nid, nodes))
				return -EIO;
		}
		addr += PAGE_SIZE;
	}
	return 0;
}
コード例 #26
0
ファイル: ptrace.c プロジェクト: shattered/linux-m68k
/*
 * This routine gets a long from any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 */
static unsigned long get_long(struct task_struct * tsk, 
	struct vm_area_struct * vma, unsigned long addr)
{
	pgd_t * pgdir;
	pmd_t * pgmiddle;
	pte_t * pgtable;
	unsigned long page;

repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (pgd_none(*pgdir)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return 0;
	}
	pgmiddle = pmd_offset(pgdir,addr);
	if (pmd_none(*pgmiddle)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace: bad page directory %08lx\n",
		       pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return 0;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	page = pte_page(*pgtable);
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page >= high_memory)
		return 0;
	page += addr & ~PAGE_MASK;
	return *(unsigned long *) page;
}
コード例 #27
0
ファイル: hugetlbpage.c プロジェクト: 7799/linux
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte = NULL;

	addr &= HPAGE_MASK;

	pgd = pgd_offset(mm, addr);
	if (!pgd_none(*pgd)) {
		pud = pud_offset(pgd, addr);
		if (!pud_none(*pud)) {
			pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd))
				pte = pte_offset_map(pmd, addr);
		}
	}
	return pte;
}
コード例 #28
0
static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return (pte_t *) 0x3a;

	pud = pud_offset(pgd, addr);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
		return (pte_t *) 0x3b;

	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
		return (pte_t *) 0x10;

	return pte_offset_map(pmd, addr);
}
コード例 #29
0
ファイル: sys_mem_page.c プロジェクト: brynjulfr/CS4500
//walk_page_table modified
static pte_t *walk_page_table(unsigned long addr)
{
   pgd_t *pgdp;
   pud_t *pudp;
   pmd_t *pmdp;
   pte_t *ptep;
   pgdp = pgd_offset_k(addr);
   if (pgd_none(*pgdp))
      return NULL;
   pudp = pud_offset(pgdp,addr);
   if (pud_none(*pudp) || pud_large(*pudp))
      return NULL;
   pmdp = pmd_offset(pudp, addr);
   if (pmd_none(*pmdp) || pmd_large(*pmdp))
      return NULL;
   ptep = pte_offset_kernel(pmdp, addr);
   if (pte_none(*ptep))
      return NULL;
   return ptep;
}
コード例 #30
0
ファイル: eth_hook_2.c プロジェクト: andrewpeck/emu
static inline unsigned long uvirt_to_kva(pgd_t *pgd, unsigned long adr)
{
  unsigned long ret = 0UL;
  pmd_t *pmd;
  pte_t *ptep, pte;
  
  if (!pgd_none(*pgd)) {
    pmd = pmd_offset(pgd, adr);
    if (!pmd_none(*pmd)) {
      ptep = pte_offset_kernel(pmd, adr);
      pte = *ptep;
      if(pte_present(pte)) {
	ret = (unsigned long) page_address(pte_page(pte));
	ret |= (adr & (PAGE_SIZE - 1));
      }
    }
  }
  //  printk(KERN_INFO "uv2kva(%lx-->%lx) \n", adr, ret);
  return ret;
}