コード例 #1
0
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
	pmd_t *pd;
	pte_t *pg;
	int err = -ENOMEM;

	/* Use upper 10 bits of VA to index the first level map */
	pd = pmd_offset(pgd_offset_k(va), va);
	/* Use middle 10 bits of VA to index the second-level map */
	pg = pte_alloc_kernel(pd, va);
	if (pg != 0) {
		err = 0;
		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
		if (mem_init_done)
			flush_HPTE(0, va, pmd_val(*pd));
	}
コード例 #2
0
ファイル: pgtable-book3s64.c プロジェクト: peda-r/i2c-mux
/*
 * set a new huge pmd. We should not be called for updating
 * an existing pmd entry. That should go via pmd_hugepage_update.
 */
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_DEBUG_VM
	/*
	 * Make sure hardware valid bit is not set. We don't do
	 * tlb flush for this update.
	 */

	WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
	assert_spin_locked(pmd_lockptr(mm, pmdp));
	WARN_ON(!(pmd_large(pmd) || pmd_devmap(pmd)));
#endif
	trace_hugepage_set_pmd(addr, pmd_val(pmd));
	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
コード例 #3
0
ファイル: ptrace.c プロジェクト: andreiw/mkunity
/*
 * This routine puts a long into any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 * Now keeps R/W state of page so that a text page stays readonly
 * even if a debugger scribbles breakpoints into it.  -M.U-
 */
static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
	unsigned long data)
{
	pgd_t *pgdir;
	pmd_t *pgmiddle;
	pte_t *pgtable;
	unsigned long page;

repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (!pgd_present(*pgdir)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return;
	}
	pgmiddle = pmd_offset(pgdir, addr);
	if (pmd_none(*pgmiddle)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	page = pte_page(*pgtable);
	if (!pte_write(*pgtable)) {
		do_wp_page(tsk, vma, addr, 1);
		goto repeat;
	}
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page < high_memory)
		*(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
	set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	flush_tlb();
}
コード例 #4
0
/**
 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
 *				single page
 * @start: The starting virtual address of the page part.
 * @end: The ending virtual address of the page part.
 *
 * Invalidate the icache for part of a single page, as determined by the
 * virtual addresses given.  The page must be in the paged area.  The dcache is
 * not flushed as the cache must be in write-through mode to get here.
 */
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/* work out how much of the page to flush */
	off = start & ~PAGE_MASK;
	size = end - start;

	/* get the physical address the page is mapped to from the page
	 * tables */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/* invalidate the icache coverage on that region */
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
}
コード例 #5
0
/*
 * In order to soft-boot, we need to insert a 1:1 mapping in place of
 * the user-mode pages.  This will then ensure that we have predictable
 * results when turning the mmu off
          *//*
         void setup_mm_for_reboot(char mode)
         {
         unsigned long base_pmdval;
         pgd_t *pgd;
         int i;

         if (current->mm && current->mm->pgd)
         pgd = current->mm->pgd;
         else
         pgd = init_mm.pgd;

         base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
         //        if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
         base_pmdval |= 8; //PMD_BIT4;

         for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
         unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
         pmd_t *pmd;

         pmd = pmd_off(pgd, i << PGDIR_SHIFT);
         pmd[0] = __pmd(pmdval);
         pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
         flush_pmd_entry(pmd);
         }
         } */
void setup_mm_for_reboot(char mode)
{
	pgd_t *pgd;
	pmd_t pmd;
	int i;

	if (current->mm && current->mm->pgd)
		pgd = current->mm->pgd;
	else
		pgd = init_mm.pgd;

	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {
		pmd_val(pmd) = (i << PGDIR_SHIFT) |
		PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT | 8;
		set_pmd(pmd_offset(pgd + i, i << PGDIR_SHIFT), pmd);
	}
}
コード例 #6
0
ファイル: ioremap.c プロジェクト: Dzenik/kernel-source
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmdp;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmdp = pmd_offset(pud, addr);
	do {
		pmd_t pmd = *pmdp;

		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PMD_SIZE;
		pmdp += 2;
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}
コード例 #7
0
ファイル: vsyscall_64.c プロジェクト: austriancoder/linux
/*
 * The VSYSCALL page is the only user-accessible page in the kernel address
 * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
 * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
 * are enabled.
 *
 * Some day we may create a "minimal" vsyscall mode in which we emulate
 * vsyscalls but leave the page not present.  If so, we skip calling
 * this.
 */
void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
	set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
	p4d = p4d_offset(pgd, VSYSCALL_ADDR);
#if CONFIG_PGTABLE_LEVELS >= 5
	set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
#endif
	pud = pud_offset(p4d, VSYSCALL_ADDR);
	set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
	pmd = pmd_offset(pud, VSYSCALL_ADDR);
	set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
}
コード例 #8
0
static void machine_kexec_page_table_set_one(
	pgd_t *pgd, pmd_t *pmd, pte_t *pte,
	unsigned long vaddr, unsigned long paddr)
{
	pud_t *pud;

	pgd += pgd_index(vaddr);
#ifdef CONFIG_X86_PAE
	if (!(pgd_val(*pgd) & _PAGE_PRESENT))
		set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT));
#endif
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	if (!(pmd_val(*pmd) & _PAGE_PRESENT))
		set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
}
コード例 #9
0
void __init mt_map_io(void)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    iotable_init(mt_io_desc, ARRAY_SIZE(mt_io_desc));

    /* set NS=1 for NS_GIC_CPU_BASE */
    pgd = pgd_offset(&init_mm, NS_GIC_CPU_BASE);
    pud = pud_offset(pgd, NS_GIC_CPU_BASE);
    pmd = pmd_offset(pud, NS_GIC_CPU_BASE);
    if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) {
        __raw_writel(__raw_readl(pmd) | PMD_TABLE_NS, pmd);
    } else {
        __raw_writel(__raw_readl(pmd) | PMD_SECT_NS, pmd);
    }
    flush_pmd_entry(pmd);
}
コード例 #10
0
/*
 * Create a page table and place a pointer to it in a middle page
 * directory entry:
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
		pte_t *page_table = NULL;

		if (after_bootmem) {
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
			if (!page_table)
				page_table =
				(pte_t *)alloc_bootmem_pages(PAGE_SIZE);
		} else
			page_table = (pte_t *)alloc_low_page();

		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
	}
コード例 #11
0
/*
 * This is useful to dump out the page tables associated with
 * 'addr' in mm 'mm'.
 */
void show_pte(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;

	if (!mm)
		mm = &init_mm;

	printk(KERN_ALERT "pgd = %p\n", mm->pgd);
	pgd = pgd_offset(mm, addr);
	printk(KERN_ALERT "*pgd = %08lx", pgd_val(*pgd));

	do {
		pmd_t *pmd;
		pte_t *pte;

		if (pgd_none(*pgd))
			break;

		if (pgd_bad(*pgd)) {
			printk("(bad)");
			break;
		}

		pmd = pmd_offset(pgd, addr);
		printk(", *pmd = %08lx", pmd_val(*pmd));

		if (pmd_none(*pmd))
			break;

		if (pmd_bad(*pmd)) {
			printk("(bad)");
			break;
		}

		pte = pte_offset(pmd, addr);
		printk(", *pte = %08lx", pte_val(*pte));
#ifdef CONFIG_CPU_32
		printk(", *ppte = %08lx", pte_val(pte[-PTRS_PER_PTE]));
#endif
	} while(0);

	printk("\n");
}
コード例 #12
0
ファイル: kmap.c プロジェクト: 3sOx/asuswrt-merlin
/*
 * __iounmap unmaps nearly everything, so be careful
 * it doesn't free currently pointer/page tables anymore but it
 * wans't used anyway and might be added later.
 */
void __iounmap(void *addr, unsigned long size)
{
	unsigned long virtaddr = (unsigned long)addr;
	pgd_t *pgd_dir;
	pmd_t *pmd_dir;
	pte_t *pte_dir;

	while ((long)size > 0) {
		pgd_dir = pgd_offset_k(virtaddr);
		if (pgd_bad(*pgd_dir)) {
			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
			pgd_clear(pgd_dir);
			return;
		}
		pmd_dir = pmd_offset(pgd_dir, virtaddr);

		if (CPU_IS_020_OR_030) {
			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;

			if (pmd_type == _PAGE_PRESENT) {
				pmd_dir->pmd[pmd_off] = 0;
				virtaddr += PTRTREESIZE;
				size -= PTRTREESIZE;
				continue;
			} else if (pmd_type == 0)
				continue;
		}

		if (pmd_bad(*pmd_dir)) {
			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
			pmd_clear(pmd_dir);
			return;
		}
		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);

		pte_val(*pte_dir) = 0;
		virtaddr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	flush_tlb_all();
}
コード例 #13
0
ファイル: ptrace.c プロジェクト: shattered/linux-m68k
/*
 * This routine gets a long from any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 */
static unsigned long get_long(struct task_struct * tsk, 
	struct vm_area_struct * vma, unsigned long addr)
{
	pgd_t * pgdir;
	pmd_t * pgmiddle;
	pte_t * pgtable;
	unsigned long page;

repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (pgd_none(*pgdir)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return 0;
	}
	pgmiddle = pmd_offset(pgdir,addr);
	if (pmd_none(*pgmiddle)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace: bad page directory %08lx\n",
		       pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return 0;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		do_no_page(tsk, vma, addr, 0);
		goto repeat;
	}
	page = pte_page(*pgtable);
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page >= high_memory)
		return 0;
	page += addr & ~PAGE_MASK;
	return *(unsigned long *) page;
}
コード例 #14
0
ファイル: oleole_spt.c プロジェクト: nminoru/oleolevm
int oleole_get_gPTE_offset_with_alloc(struct mm_struct *mm, pte_t **result, unsigned long address)
{
	pgd_t *pgd, pgd_v;
	pud_t *pud, pud_v;
	pmd_t *pmd, pmd_v;
	pte_t *pte;

	pgd   = pgd_offset(mm, address);
	pgd_v = *pgd;
	if (pgd_none(pgd_v))
		if (pud_alloc(mm, pgd, address) == NULL)
			return -ENOMEM;

	pud   = pud_offset(pgd, address);
	pud_v = *pud;

	if (oleole_pud_none(pud_v))
		if (oleole_pmd_alloc(pud))
			return -ENOMEM;

	if (unlikely((pud_val(pud_v) & _PAGE_DEACTIVATED)))
		reactivate_pmd_table(pud);

	pmd   = pmd_offset(pud, address);
	pmd_v = *pmd;

	if (oleole_pmd_none(pmd_v))
		if (oleole_pte_alloc(pmd))
			return -ENOMEM;

	if (unlikely((pmd_val(pmd_v) & _PAGE_DEACTIVATED)))
		reactivate_pte_table(pmd);

	pte  = pte_offset_map(pmd, address);

	*result = pte;

	return 0;
}
コード例 #15
0
ファイル: oleole_spt.c プロジェクト: nminoru/oleolevm
static void reactivate_pmd_table(pud_t *pud)
{
	int i;
	pmd_t *pmd;
	unsigned long val;

	val  = pud_val(*pud);

	if (likely(val & ~(_PAGE_DEACTIVATED|_PAGE_PRESENT)))
		val &= ~_PAGE_DEACTIVATED;
	else
		val &= ~(_PAGE_DEACTIVATED|_PAGE_PRESENT);

	*pud = __pud(val);

	pmd = pmd_offset(pud, 0);
	for (i=0 ; i<PTRS_PER_PMD ; i++, pmd++) {
		val  = pmd_val(*pmd);
		val |= (_PAGE_DEACTIVATED|_PAGE_PRESENT);
		*pmd = __pmd(val);
	}
}
コード例 #16
0
ファイル: dump_pagetables.c プロジェクト: 03199618/linux
static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
							unsigned long P)
{
	int i;
	pmd_t *start;

	start = (pmd_t *) pud_page_vaddr(addr);
	for (i = 0; i < PTRS_PER_PMD; i++) {
		st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
		if (!pmd_none(*start)) {
			pgprotval_t prot = pmd_val(*start) & PTE_FLAGS_MASK;

			if (pmd_large(*start) || !pmd_present(*start))
				note_page(m, st, __pgprot(prot), 3);
			else
				walk_pte_level(m, st, *start,
					       P + i * PMD_LEVEL_MULT);
		} else
			note_page(m, st, __pgprot(0), 3);
		start++;
	}
}
コード例 #17
0
ファイル: init_32.c プロジェクト: exchb/linux-kernel-note
/*
 * Create a page table and place a pointer to it in a middle page
 * directory entry:
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
    // 判断了pte是不是在内存中
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
		pte_t *page_table = NULL;

		if (after_bootmem) {
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
			if (!page_table)
				page_table =
				(pte_t *)alloc_bootmem_pages(PAGE_SIZE);
		} else
            // 这货按页分配,4k页,1024个指针完全没问题啊....
			page_table = (pte_t *)alloc_low_page();

		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
	    // __pmd : return (pmd_t) { val };
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
	}
コード例 #18
0
ファイル: oleole_spt.c プロジェクト: nminoru/oleolevm
static void reactivate_pte_table(pmd_t *pmd)
{
	int i;
	pte_t *pte;
	unsigned long val;

	val  = pmd_val(*pmd);

	if (likely(val & ~(_PAGE_DEACTIVATED|_PAGE_PRESENT)))
		val &= ~_PAGE_DEACTIVATED;
	else
		val &= ~(_PAGE_DEACTIVATED|_PAGE_PRESENT);

	*pmd = __pmd(val);
	
	/* PTEs */
	pte = pte_offset_map(pmd, 0);
	for (i=0 ; i<PTRS_PER_PTE ; i++, pte++) {
		val  = pte_val(*pte);
		val |= (_PAGE_DEACTIVATED|_PAGE_PRESENT);
		*pte = __pte(val);
	}
}
コード例 #19
0
ファイル: array.c プロジェクト: liexusong/linux2.0-comment
static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned long size,
                                   int * pages, int * shared, int * dirty, int * total)
{
    pte_t * pte;
    unsigned long end;

    if (pmd_none(*pmd))
        return;
    if (pmd_bad(*pmd)) {
        printk("statm_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
        pmd_clear(pmd);
        return;
    }
    pte = pte_offset(pmd, address);
    address &= ~PMD_MASK;
    end = address + size;
    if (end > PMD_SIZE)
        end = PMD_SIZE;
    do {
        pte_t page = *pte;

        address += PAGE_SIZE;
        pte++;
        if (pte_none(page))
            continue;
        ++*total;
        if (!pte_present(page))
            continue;
        ++*pages;
        if (pte_dirty(page))
            ++*dirty;
        if (pte_page(page) >= high_memory)
            continue;
        if (mem_map[MAP_NR(pte_page(page))].count > 1)
            ++*shared;
    } while (address < end);
}
コード例 #20
0
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
	pgd_t *pgd;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	do {
		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);

		pmd = *pmdp;
		if (!pmd_none(pmd)) {
			pmd_clear(pmdp);

			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	flush_tlb_kernel_range(virt, end);
}
コード例 #21
0
ファイル: init_64.c プロジェクト: PennPanda/linux-repo
static void __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
{
	int i = pmd_index(address);

	for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
		unsigned long entry;
		pmd_t *pmd = pmd_page + pmd_index(address);

		if (address >= end) {
			if (!after_bootmem)
				for (; i < PTRS_PER_PMD; i++, pmd++)
					set_pmd(pmd, __pmd(0));
			break;
		}

		if (pmd_val(*pmd))
			continue;

		entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
		entry &= __supported_pte_mask;
		set_pmd(pmd, __pmd(entry));
	}
}
コード例 #22
0
/*
 * Dump out the page tables associated with 'addr' in mm 'mm'.
 */
void show_pte(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;

	if (!mm)
		mm = &init_mm;

	pr_alert("pgd = %p\n", mm->pgd);
	pgd = pgd_offset(mm, addr);
	pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));

	do {
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		if (pgd_none(*pgd) || pgd_bad(*pgd))
			break;

		pud = pud_offset(pgd, addr);
		printk(", *pud=%016llx", pud_val(*pud));
		if (pud_none(*pud) || pud_bad(*pud))
			break;

		pmd = pmd_offset(pud, addr);
		printk(", *pmd=%016llx", pmd_val(*pmd));
		if (pmd_none(*pmd) || pmd_bad(*pmd))
			break;

		pte = pte_offset_map(pmd, addr);
		printk(", *pte=%016llx", pte_val(*pte));
		pte_unmap(pte);
	} while(0);

	printk("\n");
}
コード例 #23
0
ファイル: hugepage-hash64.c プロジェクト: 03199618/linux
int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
		    pmd_t *pmdp, unsigned long trap, int local, int ssize,
		    unsigned int psize)
{
	unsigned int index, valid;
	unsigned char *hpte_slot_array;
	unsigned long rflags, pa, hidx;
	unsigned long old_pmd, new_pmd;
	int ret, lpsize = MMU_PAGE_16M;
	unsigned long vpn, hash, shift, slot;

	/*
	 * atomically mark the linux large page PMD busy and dirty
	 */
	do {
		old_pmd = pmd_val(*pmdp);
		/* If PMD busy, retry the access */
		if (unlikely(old_pmd & _PAGE_BUSY))
			return 0;
		/* If PMD is trans splitting retry the access */
		if (unlikely(old_pmd & _PAGE_SPLITTING))
			return 0;
		/* If PMD permissions don't match, take page fault */
		if (unlikely(access & ~old_pmd))
			return 1;
		/*
		 * Try to lock the PTE, add ACCESSED and DIRTY if it was
		 * a write access
		 */
		new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
		if (access & _PAGE_RW)
			new_pmd |= _PAGE_DIRTY;
	} while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
					  old_pmd, new_pmd));
	/*
	 * PP bits. _PAGE_USER is already PP bit 0x2, so we only
	 * need to add in 0x1 if it's a read-only user page
	 */
	rflags = new_pmd & _PAGE_USER;
	if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
					   (new_pmd & _PAGE_DIRTY)))
		rflags |= 0x1;
	/*
	 * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
	 */
	rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);

#if 0
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {

		/*
		 * No CPU has hugepages but lacks no execute, so we
		 * don't need to worry about that case
		 */
		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
	}
#endif
	/*
	 * Find the slot index details for this ea, using base page size.
	 */
	shift = mmu_psize_defs[psize].shift;
	index = (ea & ~HPAGE_PMD_MASK) >> shift;
	BUG_ON(index >= 4096);

	vpn = hpt_vpn(ea, vsid, ssize);
	hash = hpt_hash(vpn, shift, ssize);
	hpte_slot_array = get_hpte_slot_array(pmdp);

	valid = hpte_valid(hpte_slot_array, index);
	if (valid) {
		/* update the hpte bits */
		hidx =  hpte_hash_index(hpte_slot_array, index);
		if (hidx & _PTEIDX_SECONDARY)
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += hidx & _PTEIDX_GROUP_IX;

		ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
					   psize, lpsize, ssize, local);
		/*
		 * We failed to update, try to insert a new entry.
		 */
		if (ret == -1) {
			/*
			 * large pte is marked busy, so we can be sure
			 * nobody is looking at hpte_slot_array. hence we can
			 * safely update this here.
			 */
			valid = 0;
			new_pmd &= ~_PAGE_HPTEFLAGS;
			hpte_slot_array[index] = 0;
		} else
			/* clear the busy bits and set the hash pte bits */
			new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
	}

	if (!valid) {
		unsigned long hpte_group;

		/* insert new entry */
		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
repeat:
		hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;

		/* clear the busy bits and set the hash pte bits */
		new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;

		/* Add in WIMG bits */
		rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
				      _PAGE_COHERENT | _PAGE_GUARDED));

		/* Insert into the hash table, primary slot */
		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
					  psize, lpsize, ssize);
		/*
		 * Primary is full, try the secondary
		 */
		if (unlikely(slot == -1)) {
			hpte_group = ((~hash & htab_hash_mask) *
				      HPTES_PER_GROUP) & ~0x7UL;
			slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
						  rflags, HPTE_V_SECONDARY,
						  psize, lpsize, ssize);
			if (slot == -1) {
				if (mftb() & 0x1)
					hpte_group = ((hash & htab_hash_mask) *
						      HPTES_PER_GROUP) & ~0x7UL;

				ppc_md.hpte_remove(hpte_group);
				goto repeat;
			}
		}
		/*
		 * Hypervisor failure. Restore old pmd and return -1
		 * similar to __hash_page_*
		 */
		if (unlikely(slot == -2)) {
			*pmdp = __pmd(old_pmd);
			hash_failure_debug(ea, access, vsid, trap, ssize,
					   psize, lpsize, old_pmd);
			return -1;
		}
		/*
		 * large pte is marked busy, so we can be sure
		 * nobody is looking at hpte_slot_array. hence we can
		 * safely update this here.
		 */
		mark_hpte_slot_valid(hpte_slot_array, index, slot);
	}
	/*
	 * No need to use ldarx/stdcx here
	 */
	*pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
	return 0;
}
コード例 #24
0
ファイル: srmmu.c プロジェクト: GongZiYuan/linux
static inline int srmmu_pmd_none(pmd_t pmd)
{ return !(pmd_val(pmd) & 0xFFFFFFF); }
コード例 #25
0
unsigned int m4u_user_v2p(unsigned int va)
{
    unsigned int pmdOffset = (va & (PMD_SIZE - 1));
    unsigned int pageOffset = (va & (PAGE_SIZE - 1));
    pgd_t *pgd;
    pmd_t *pmd;
    pte_t *pte;
    unsigned int pa;
    
    if(NULL==current)
    {
    	  M4UMSG("error: m4u_user_v2p, current is NULL! \n");
    	  return 0;
    }
    if(NULL==current->mm)
    {
    	  M4UMSG("error: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
    	  return 0;
    }
        
    pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
    M4UDBG("m4u_user_v2p(), pgd 0x%x\n", pgd);    
    M4UDBG("pgd_none=%d, pgd_bad=%d\n", pgd_none(*pgd), pgd_bad(*pgd));
    
    if(pgd_none(*pgd)||pgd_bad(*pgd))
    {
        M4UMSG("warning: m4u_user_v2p(), va=0x%x, pgd invalid! \n", va);
        return 0;
    }
    
    pmd = pmd_offset(pgd, va);
    M4UDBG("m4u_user_v2p(), pmd 0x%x\n", pmd);
    M4UDBG("pmd_none=%d, pmd_bad=%d, pmd_val=0x%x\n", pmd_none(*pmd), pmd_bad(*pmd), pmd_val(*pmd));
   
    /* If this is a page table entry, keep on walking to the next level */ 
    if (( (unsigned int)pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
    {
        if(pmd_none(*pmd)||pmd_bad(*pmd))
        {
            M4UDBG("warning: m4u_user_v2p(), va=0x%x, pmd invalid! \n", va);
            return 0;
        }
        
        // we encounter some pte not preset issue, do not know why    
        pte = pte_offset_map(pmd, va);        
        if(pte_present(*pte)) 
        { 
            pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; 
            M4UDBG("PA = 0x%8x\n", pa);
            return pa; 
        }
    }
    else /* Only 1 level page table */
    {
       if(pmd_none(*pmd))
       {
          M4UDBG("Error: m4u_user_v2p(), virtual addr 0x%x, pmd invalid! \n", va);
          return 0;
       }
       pa=(pte_val(*pmd) & (PMD_MASK)) | pmdOffset; 
       M4UDBG("PA = 0x%8x\n", pa);
       return pa;    
    }
    
    M4UDBG("warning: m4u_user_v2p(), pte invalid! \n");
    // m4u_dump_maps(va);
    
    return 0;
}
コード例 #26
0
ファイル: kmap.c プロジェクト: sarnobat/knoppix
/*
 * Set new cache mode for some kernel address space.
 * The caller must push data for that range itself, if such data may already
 * be in the cache.
 */
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
	unsigned long virtaddr = (unsigned long)addr;
	pgd_t *pgd_dir;
	pmd_t *pmd_dir;
	pte_t *pte_dir;

	if (CPU_IS_040_OR_060) {
		switch (cmode) {
		case IOMAP_FULL_CACHING:
			cmode = _PAGE_CACHE040;
			break;
		case IOMAP_NOCACHE_SER:
		default:
			cmode = _PAGE_NOCACHE_S;
			break;
		case IOMAP_NOCACHE_NONSER:
			cmode = _PAGE_NOCACHE;
			break;
		case IOMAP_WRITETHROUGH:
			cmode = _PAGE_CACHE040W;
			break;
		}
	} else {
		switch (cmode) {
		case IOMAP_NOCACHE_SER:
		case IOMAP_NOCACHE_NONSER:
		default:
			cmode = _PAGE_NOCACHE030;
			break;
		case IOMAP_FULL_CACHING:
		case IOMAP_WRITETHROUGH:
			cmode = 0;
		}
	}

	while ((long)size > 0) {
		pgd_dir = pgd_offset_k(virtaddr);
		if (pgd_bad(*pgd_dir)) {
			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
			pgd_clear(pgd_dir);
			return;
		}
		pmd_dir = pmd_offset(pgd_dir, virtaddr);

		if (CPU_IS_020_OR_030) {
			int pmd_off = (virtaddr/PTRTREESIZE) & 15;

			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
							 _CACHEMASK040) | cmode;
				virtaddr += PTRTREESIZE;
				size -= PTRTREESIZE;
				continue;
			}
		}

		if (pmd_bad(*pmd_dir)) {
			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
			pmd_clear(pmd_dir);
			return;
		}
		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);

		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
		virtaddr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	flush_tlb_all();
}
コード例 #27
0
ファイル: hugetlbpage.c プロジェクト: CCNITSilchar/linux
int pmd_huge(pmd_t pmd)
{
	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}
コード例 #28
0
ファイル: pgtable.c プロジェクト: szllong123/nvmmfs
inline pte_t* nvmm_get_pte(pmd_t *pmd)
{
    return (pte_t*)__va(pmd_val(*pmd) & PAGE_MASK);
}
コード例 #29
0
ファイル: hugetlbpage.c プロジェクト: 0-T-0/ps4-linux
int pmd_huge(pmd_t pmd)
{
	return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
}
コード例 #30
0
int pmd_huge(pmd_t pmd)
{
	return (pmd_val(pmd) & _PAGE_HUGE) != 0;
}