コード例 #1
0
/*
 * Create a page table and place a pointer to it in a middle page
 * directory entry:
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
		pte_t *page_table = (pte_t *)alloc_low_page();

		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
		set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
#else
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
#endif
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
	}
コード例 #2
0
ファイル: hibernate.c プロジェクト: CaiBirdZhang/linux
static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
		    unsigned long end)
{
	pmd_t *src_pmdp;
	pmd_t *dst_pmdp;
	unsigned long next;
	unsigned long addr = start;

	if (pud_none(READ_ONCE(*dst_pudp))) {
		dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
		if (!dst_pmdp)
			return -ENOMEM;
		pud_populate(&init_mm, dst_pudp, dst_pmdp);
	}
	dst_pmdp = pmd_offset(dst_pudp, start);

	src_pmdp = pmd_offset(src_pudp, start);
	do {
		pmd_t pmd = READ_ONCE(*src_pmdp);

		next = pmd_addr_end(addr, end);
		if (pmd_none(pmd))
			continue;
		if (pmd_table(pmd)) {
			if (copy_pte(dst_pmdp, src_pmdp, addr, next))
				return -ENOMEM;
		} else {
			set_pmd(dst_pmdp,
				__pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
		}
	} while (dst_pmdp++, src_pmdp++, addr = next, addr != end);

	return 0;
}
コード例 #3
0
/**
 * resume_map_numa_kva - add KVA mapping to the temporary page tables created
 *                       during resume from hibernation
 * @pgd_base - temporary resume page directory
 */
void resume_map_numa_kva(pgd_t *pgd_base)
{
	int node;

	for_each_online_node(node) {
		unsigned long start_va, start_pfn, nr_pages, pfn;

		start_va = (unsigned long)node_remap_start_vaddr[node];
		start_pfn = node_remap_start_pfn[node];
		nr_pages = (node_remap_end_vaddr[node] -
			    node_remap_start_vaddr[node]) >> PAGE_SHIFT;

		printk(KERN_DEBUG "%s: node %d\n", __func__, node);

		for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) {
			unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
			pgd_t *pgd = pgd_base + pgd_index(vaddr);
			pud_t *pud = pud_offset(pgd, vaddr);
			pmd_t *pmd = pmd_offset(pud, vaddr);

			set_pmd(pmd, pfn_pmd(start_pfn + pfn,
						PAGE_KERNEL_LARGE_EXEC));

			printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
				__func__, vaddr, start_pfn + pfn);
		}
	}
}
コード例 #4
0
static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;

	unsigned long flags = 1; /* 1 = CB0-1 device */


	DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n",  pa, va));

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp)) {
		pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
		if (pmdp == NULL) panic("No memory for pmd\n");
		memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pgdp, va);
	if (pmd_none(*pmdp)) {
		ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
		if (ptep == NULL) panic("No memory for pte\n");
		clear_page((void *)ptep);
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	ptep = pte_offset(pmdp, va);
	set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
			_PAGE_READ | _PAGE_WRITE | 
			_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
}
コード例 #5
0
ファイル: ioremap.c プロジェクト: 0-T-0/ps4-linux
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
		    size_t size, const struct mem_type *type)
{
	unsigned long addr = virt, end = virt + size;
	pgd_t *pgd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(addr);
	do {
		pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);

		set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
		pfn += SZ_4M >> PAGE_SHIFT;
		flush_pmd_entry(pmd);

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	return 0;
}
コード例 #6
0
static int handle_vmalloc_fault(unsigned long address)
{
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 */
	pgd_t *pgd, *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;

	pgd = pgd_offset_fast(current->active_mm, address);
	pgd_k = pgd_offset_k(address);

	if (!pgd_present(*pgd_k))
		goto bad_area;

	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		goto bad_area;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		goto bad_area;

	set_pmd(pmd, *pmd_k);

	/* XXX: create the TLB entry here */
	return 0;

bad_area:
	return 1;
}
コード例 #7
0
ファイル: init_64.c プロジェクト: PennPanda/linux-repo
/*
 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 */
int __meminit vmemmap_populate(struct page *start_page,
						unsigned long size, int node)
{
	unsigned long addr = (unsigned long)start_page;
	unsigned long end = (unsigned long)(start_page + size);
	unsigned long next;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	for (; addr < end; addr = next) {
		next = pmd_addr_end(addr, end);

		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
		pud = vmemmap_pud_populate(pgd, addr, node);
		if (!pud)
			return -ENOMEM;

		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			pte_t entry;
			void *p = vmemmap_alloc_block(PMD_SIZE, node);
			if (!p)
				return -ENOMEM;

			entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
			mk_pte_huge(entry);
			set_pmd(pmd, __pmd(pte_val(entry)));

			printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
				addr, addr + PMD_SIZE - 1, p, node);
		} else
コード例 #8
0
ファイル: fault.c プロジェクト: 0xroot/Blackphone-BP1-Kernel
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;

	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;
	if (!pmd_present(*pmd)) {
		set_pmd(pmd, *pmd_k);
		arch_flush_lazy_mmu_mode();
	} else
		BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
	return pmd_k;
}
コード例 #9
0
ファイル: mmu.c プロジェクト: AlexShiLucky/linux
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
	pgd_t *pgd = pgd_offset_k(vaddr);
	pmd_t *pmd = pmd_offset(pgd, vaddr);
	pte_t *pte;
	unsigned long i;

	n_pages = ALIGN(n_pages, PTRS_PER_PTE);

	pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
		 __func__, vaddr, n_pages);

	pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);

	for (i = 0; i < n_pages; ++i)
		pte_clear(NULL, 0, pte + i);

	for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
		pte_t *cur_pte = pte + i;

		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
		BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
		pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
			 __func__, pmd, cur_pte);
	}
	return pte;
}
コード例 #10
0
ファイル: hibernate_64.c プロジェクト: 325116067/semc-qsd8x50
static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
{
	long i, j;

	i = pud_index(address);
	pud = pud + i;
	for (; i < PTRS_PER_PUD; pud++, i++) {
		unsigned long paddr;
		pmd_t *pmd;

		paddr = address + i*PUD_SIZE;
		if (paddr >= end)
			break;

		pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
		if (!pmd)
			return -ENOMEM;
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
		for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
			unsigned long pe;

			if (paddr >= end)
				break;
			pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
			pe &= __supported_pte_mask;
			set_pmd(pmd, __pmd(pe));
		}
	}
	return 0;
}
コード例 #11
0
ファイル: pgtable.c プロジェクト: 274914765/C
/*
 * Associate a large virtual page frame with a given physical page frame
 * and protection flags for that frame. pfn is for the base of the page,
 * vaddr is what the page gets mapped to - both must be properly aligned.
 * The pmd must already be instantiated. Assumes PAE mode.
 */
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    if (vaddr & (PMD_SIZE-1)) {        /* vaddr is misaligned */
        printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
        return; /* BUG(); */
    }
    if (pfn & (PTRS_PER_PTE-1)) {        /* pfn is misaligned */
        printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
        return; /* BUG(); */
    }
    pgd = swapper_pg_dir + pgd_index(vaddr);
    if (pgd_none(*pgd)) {
        printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
        return; /* BUG(); */
    }
    pud = pud_offset(pgd, vaddr);
    pmd = pmd_offset(pud, vaddr);
    set_pmd(pmd, pfn_pmd(pfn, flags));
    /*
     * It's enough to flush this one mapping.
     * (PGE mappings get flushed as well)
     */
    __flush_tlb_one(vaddr);
}
コード例 #12
0
static int __do_vmalloc_fault(unsigned long addr, struct mm_struct *mm)
{
	/* Synchronise this task's top level page-table
	 * with the 'reference' page table.
	 */
	int offset = __pgd_offset(addr);
	pgd_t *pgd, *pgd_k;
	pmd_t *pmd, *pmd_k;

	pgd_k = init_mm.pgd + offset;
	if (!pgd_present(*pgd_k))
		goto bad_area;

	pgd = mm->pgd + offset;
#if 0	/* note that we are two-level */
	if (!pgd_present(*pgd))
		set_pgd(pgd, *pgd_k);
#endif

	pmd_k = pmd_offset(pgd_k, addr);
	if (pmd_none(*pmd_k))
		goto bad_area;

	pmd = pmd_offset(pgd, addr);
	if (!pmd_none(*pmd))
		goto bad_area;
	set_pmd(pmd, *pmd_k);
	return 1;

bad_area:
	return -2;
}
コード例 #13
0
ファイル: hibernate_32.c プロジェクト: bristot/linux
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_base + pgd_index(restore_jump_address);

	pmd = resume_one_md_table_init(pgd);
	if (!pmd)
		return -ENOMEM;

	if (boot_cpu_has(X86_FEATURE_PSE)) {
		set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
	} else {
		pte = resume_one_page_table_init(pmd);
		if (!pte)
			return -ENOMEM;
		set_pte(pte + pte_index(restore_jump_address),
		__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
	}

	return 0;
}
コード例 #14
0
ファイル: init_64.c プロジェクト: PennPanda/linux-repo
/* Must run before zap_low_mappings */
__meminit void *early_ioremap(unsigned long addr, unsigned long size)
{
	unsigned long vaddr;
	pmd_t *pmd, *last_pmd;
	int i, pmds;

	pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
	vaddr = __START_KERNEL_map;
	pmd = level2_kernel_pgt;
	last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
	for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
		for (i = 0; i < pmds; i++) {
			if (pmd_present(pmd[i]))
				goto next;
		}
		vaddr += addr & ~PMD_MASK;
		addr &= PMD_MASK;
		for (i = 0; i < pmds; i++, addr += PMD_SIZE)
			set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
		__flush_tlb();
		return (void *)vaddr;
	next:
		;
	}
	printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
	return NULL;
}
コード例 #15
0
ファイル: init_64.c プロジェクト: PennPanda/linux-repo
/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
   from the CPU leading to inconsistent cache lines. address and size
   must be aligned to 2MB boundaries. 
   Does nothing when the mapping doesn't exist. */
void __init clear_kernel_mapping(unsigned long address, unsigned long size) 
{
	unsigned long end = address + size;

	BUG_ON(address & ~LARGE_PAGE_MASK);
	BUG_ON(size & ~LARGE_PAGE_MASK); 
	
	for (; address < end; address += LARGE_PAGE_SIZE) { 
		pgd_t *pgd = pgd_offset_k(address);
		pud_t *pud;
		pmd_t *pmd;
		if (pgd_none(*pgd))
			continue;
		pud = pud_offset(pgd, address);
		if (pud_none(*pud))
			continue; 
		pmd = pmd_offset(pud, address);
		if (!pmd || pmd_none(*pmd))
			continue; 
		if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 
			/* Could handle this, but it should not happen currently. */
			printk(KERN_ERR 
	       "clear_kernel_mapping: mapping has been split. will leak memory\n"); 
			pmd_ERROR(*pmd); 
		}
		set_pmd(pmd, __pmd(0)); 		
	}
	__flush_tlb_all();
} 
コード例 #16
0
ファイル: hibernate_64.c プロジェクト: KutuSystems/kutu_linux
static int relocate_restore_code(void)
{
	pgd_t *pgd;
	pud_t *pud;

	relocated_restore_code = get_safe_page(GFP_ATOMIC);
	if (!relocated_restore_code)
		return -ENOMEM;

	memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);

	/* Make the page containing the relocated code executable */
	pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
	pud = pud_offset(pgd, relocated_restore_code);
	if (pud_large(*pud)) {
		set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
	} else {
		pmd_t *pmd = pmd_offset(pud, relocated_restore_code);

		if (pmd_large(*pmd)) {
			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
		} else {
			pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);

			set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
		}
	}
	__flush_tlb_all();

	return 0;
}
コード例 #17
0
ファイル: hibernate_64.c プロジェクト: KutuSystems/kutu_linux
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
	pmd_t *pmd;
	pud_t *pud;

	/*
	 * The new mapping only has to cover the page containing the image
	 * kernel's entry point (jump_address_phys), because the switch over to
	 * it is carried out by relocated code running from a page allocated
	 * specifically for this purpose and covered by the identity mapping, so
	 * the temporary kernel text mapping is only needed for the final jump.
	 * Moreover, in that mapping the virtual address of the image kernel's
	 * entry point must be the same as its virtual address in the image
	 * kernel (restore_jump_address), so the image kernel's
	 * restore_registers() code doesn't find itself in a different area of
	 * the virtual address space after switching over to the original page
	 * tables used by the image kernel.
	 */
	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
	if (!pud)
		return -ENOMEM;

	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
	if (!pmd)
		return -ENOMEM;

	set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
	set_pud(pud + pud_index(restore_jump_address),
		__pud(__pa(pmd) | _KERNPG_TABLE));
	set_pgd(pgd + pgd_index(restore_jump_address),
		__pgd(__pa(pud) | _KERNPG_TABLE));

	return 0;
}
コード例 #18
0
ファイル: mm-armv.c プロジェクト: archith/camera_project
static inline void
alloc_init_section(unsigned long virt, unsigned long phys, int prot)
{
	pmd_t pmd;

	pmd_val(pmd) = phys | prot;
	set_pmd(pmd_offset(pgd_offset_k(virt), virt), pmd);
}
コード例 #19
0
ファイル: hibernate_64.c プロジェクト: AlexShiLucky/linux
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
	pmd_t *pmd;
	pud_t *pud;
	p4d_t *p4d = NULL;
	pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
	pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);

	/* Filter out unsupported __PAGE_KERNEL* bits: */
	pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
	pgprot_val(pgtable_prot)  &= __default_kernel_pte_mask;

	/*
	 * The new mapping only has to cover the page containing the image
	 * kernel's entry point (jump_address_phys), because the switch over to
	 * it is carried out by relocated code running from a page allocated
	 * specifically for this purpose and covered by the identity mapping, so
	 * the temporary kernel text mapping is only needed for the final jump.
	 * Moreover, in that mapping the virtual address of the image kernel's
	 * entry point must be the same as its virtual address in the image
	 * kernel (restore_jump_address), so the image kernel's
	 * restore_registers() code doesn't find itself in a different area of
	 * the virtual address space after switching over to the original page
	 * tables used by the image kernel.
	 */

	if (pgtable_l5_enabled()) {
		p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
		if (!p4d)
			return -ENOMEM;
	}

	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
	if (!pud)
		return -ENOMEM;

	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
	if (!pmd)
		return -ENOMEM;

	set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
	set_pud(pud + pud_index(restore_jump_address),
		__pud(__pa(pmd) | pgprot_val(pgtable_prot)));
	if (p4d) {
		p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
		pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));

		set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
	} else {
		/* No p4d for 4-level paging: point the pgd to the pud page table */
		pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
	}

	return 0;
}
コード例 #20
0
ファイル: assabet.c プロジェクト: TitaniumBoy/lin
/*
 * On Assabet, we must probe for the Neponset board _before_
 * paging_init() has occurred to actually determine the amount
 * of RAM available.  To do so, we map the appropriate IO section
 * in the page table here in order to access GPIO registers.
 */
static void __init map_sa1100_gpio_regs( void )
{
	unsigned long phys = __PREG(GPLR) & PMD_MASK;
	unsigned long virt = io_p2v(phys);
	int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
	pmd_t pmd;
	pmd_val(pmd) = phys | prot;
	set_pmd(pmd_offset(pgd_offset_k(virt), virt), pmd);
}
コード例 #21
0
ファイル: efi_64.c プロジェクト: 3null/fastsocket
static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
{
	if (pmd_none(*pmd)) {
		pte_t *pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
		set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
		if (pte != pte_offset_kernel(pmd, 0))
			printk(KERN_ERR "EFI PAGETABLE BUG #02!\n");
	}
	return pte_offset_kernel(pmd, vaddr);
}
コード例 #22
0
ファイル: init.c プロジェクト: 0x000000FF/Linux4Edison
/*
 * Map all physical memory into kernel's address space.
 *
 * This is explicitly coded for two-level page tables, so if you need
 * something else then this needs to change.
 */
static void __init map_ram(void)
{
	unsigned long v, p, e;
	pgprot_t prot;
	pgd_t *pge;
	pud_t *pue;
	pmd_t *pme;
	pte_t *pte;
	/* These mark extents of read-only kernel pages...
	 * ...from vmlinux.lds.S
	 */
	struct memblock_region *region;

	v = PAGE_OFFSET;

	for_each_memblock(memory, region) {
		p = (u32) region->base & PAGE_MASK;
		e = p + (u32) region->size;

		v = (u32) __va(p);
		pge = pgd_offset_k(v);

		while (p < e) {
			int j;
			pue = pud_offset(pge, v);
			pme = pmd_offset(pue, v);

			if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
				panic("%s: OR1K kernel hardcoded for "
				      "two-level page tables",
				     __func__);
			}

			/* Alloc one page for holding PTE's... */
			pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
			set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));

			/* Fill the newly allocated page with PTE'S */
			for (j = 0; p < e && j < PTRS_PER_PGD;
			     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
				if (v >= (u32) _e_kernel_ro ||
				    v < (u32) _s_kernel_ro)
					prot = PAGE_KERNEL;
				else
					prot = PAGE_KERNEL_RO;

				set_pte(pte, mk_pte_phys(p, prot));
			}

			pge++;
		}

		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
		       region->base, region->base + region->size);
	}
コード例 #23
0
ファイル: p2m.c プロジェクト: DenisLug/mptcp
/*
 * Allocate new pmd(s). It is checked whether the old pmd is still in place.
 * If not, nothing is changed. This is okay as the only reason for allocating
 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
 */
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{
	pte_t *ptechk;
	pte_t *pte_newpg[PMDS_PER_MID_PAGE];
	pmd_t *pmdp;
	unsigned int level;
	unsigned long flags;
	unsigned long vaddr;
	int i;

	/* Do all allocations first to bail out in error case. */
	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		pte_newpg[i] = alloc_p2m_page();
		if (!pte_newpg[i]) {
			for (i--; i >= 0; i--)
				free_p2m_page(pte_newpg[i]);

			return NULL;
		}
	}

	vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);

	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		copy_page(pte_newpg[i], pte_pg);
		paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);

		pmdp = lookup_pmd_address(vaddr);
		BUG_ON(!pmdp);

		spin_lock_irqsave(&p2m_update_lock, flags);

		ptechk = lookup_address(vaddr, &level);
		if (ptechk == pte_pg) {
			HYPERVISOR_shared_info->arch.p2m_generation++;
			wmb(); /* Tools are synchronizing via p2m_generation. */
			set_pmd(pmdp,
				__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
			wmb(); /* Tools are synchronizing via p2m_generation. */
			HYPERVISOR_shared_info->arch.p2m_generation++;
			pte_newpg[i] = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);

		if (pte_newpg[i]) {
			paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
			free_p2m_page(pte_newpg[i]);
		}

		vaddr += PMD_SIZE;
	}

	return lookup_address(addr, &level);
}
コード例 #24
0
ファイル: mmu.c プロジェクト: BrainFucking/linux
void split_pud(pud_t *old_pud, pmd_t *pmd)
{
	unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
	pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
	int i = 0;

	do {
		set_pmd(pmd, __pmd(addr | prot));
		addr += PMD_SIZE;
	} while (pmd++, i++, i < PTRS_PER_PMD);
}
コード例 #25
0
ファイル: init.c プロジェクト: Broadcom/stblinux-2.6.18
/*
 * This maps the physical memory to kernel virtual address space, a total 
 * of max_low_pfn pages, by creating page tables starting from address 
 * PAGE_OFFSET.
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	int pgd_idx, pmd_idx, pte_ofs;

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = one_md_table_init(pgd);
		if (pfn >= max_low_pfn)
			continue;
		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;

			/* Map with big pages if possible, otherwise create normal page tables. */
			if (cpu_has_pse) {
				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;

				if (is_kernel_text(address) || is_kernel_text(address2))
					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
				else
					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
				pfn += PTRS_PER_PTE;
			} else {
				pte = one_page_table_init(pmd);

				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
						if (is_kernel_text(address))
							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
						else
							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
				}
			}
		}
	}
}
コード例 #26
0
ファイル: mm-armo.c プロジェクト: dmgerman/linux-pre-history
pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
{
	pte_t *pte;

	pte = (pte_t *)alloc_pte_table(PTRS_PER_PTE * sizeof(pte_t), GFP_KERNEL);
	if (pmd_none(*pmd)) {
		if (pte) {
			memzero(pte, PTRS_PER_PTE * sizeof(pte_t));
			set_pmd(pmd, mk_user_pmd(pte));
			return pte + offset;
		}
		set_pmd(pmd, mk_user_pmd(get_bad_pte_table()));
		return NULL;
	}
	free_pte_slow(pte);
	if (pmd_bad(*pmd)) {
		__handle_bad_pmd(pmd);
		return NULL;
	}
	return (pte_t *) pmd_page(*pmd) + offset;
}
コード例 #27
0
/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.  The page tables are allocated out of resume-safe pages.
 */
static int resume_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	int pgd_idx, pmd_idx;

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = resume_one_md_table_init(pgd);
		if (!pmd)
			return -ENOMEM;

		if (pfn >= max_low_pfn)
			continue;

		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
			if (pfn >= max_low_pfn)
				break;

			/* Map with big pages if possible, otherwise create
			 * normal page tables.
			 * NOTE: We can mark everything as executable here
			 */
			if (cpu_has_pse) {
				set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
				pfn += PTRS_PER_PTE;
			} else {
				pte_t *max_pte;

				pte = resume_one_page_table_init(pmd);
				if (!pte)
					return -ENOMEM;

				max_pte = pte + PTRS_PER_PTE;
				for (; pte < max_pte; pte++, pfn++) {
					if (pfn >= max_low_pfn)
						break;

					set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
				}
			}
		}
	}

	resume_map_numa_kva(pgd_base);

	return 0;
}
コード例 #28
0
ファイル: mmu.c プロジェクト: BrainFucking/linux
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
				  unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  void *(*alloc)(unsigned long size))
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
	if (pud_none(*pud) || pud_sect(*pud)) {
		pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
		pud_populate(mm, pud, pmd);
		flush_tlb_all();
	}
	BUG_ON(pud_bad(*pud));

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
			pmd_t old_pmd =*pmd;
			set_pmd(pmd, __pmd(phys |
					   pgprot_val(mk_sect_prot(prot))));
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				if (pmd_table(old_pmd)) {
					phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
}
コード例 #29
0
ファイル: ident_map.c プロジェクト: AshishNamdev/linux
static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
			   unsigned long addr, unsigned long end)
{
	addr &= PMD_MASK;
	for (; addr < end; addr += PMD_SIZE) {
		pmd_t *pmd = pmd_page + pmd_index(addr);

		if (pmd_present(*pmd))
			continue;

		set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
	}
}
コード例 #30
0
ファイル: init.c プロジェクト: Broadcom/stblinux-2.6.18
/*
 * Create a page table and place a pointer to it in a middle page
 * directory entry.
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
	if (pmd_none(*pmd)) {
		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
		if (page_table != pte_offset_kernel(pmd, 0))
			BUG();	

		return page_table;
	}
	
	return pte_offset_kernel(pmd, 0);
}