Exemplo n.º 1
0
static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;

	unsigned long flags = 1; /* 1 = CB0-1 device */


	DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n",  pa, va));

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp)) {
		pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
		if (pmdp == NULL) panic("No memory for pmd\n");
		memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pgdp, va);
	if (pmd_none(*pmdp)) {
		ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
		if (ptep == NULL) panic("No memory for pte\n");
		clear_page((void *)ptep);
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	ptep = pte_offset(pmdp, va);
	set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
			_PAGE_READ | _PAGE_WRITE | 
			_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
}
Exemplo n.º 2
0
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
				 unsigned long size, unsigned long flags)
{
	int error;
	pgd_t * dir;
	unsigned long end = address + size;

	phys_addr -= address;
	dir = pgd_offset_k(address);
	flush_cache_all();
	if (address >= end)
		BUG();
	spin_lock(&init_mm.page_table_lock);
	do {
		pmd_t *pmd;
		pmd = pmd_alloc(&init_mm, dir, address);
		error = -ENOMEM;
		if (!pmd)
			break;
		if (remap_area_pmd(pmd, address, end - address,
					 phys_addr + address, flags))
			break;
		error = 0;
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	} while (address && (address < end));
	spin_unlock(&init_mm.page_table_lock);
	flush_tlb_all();
	return error;
}
Exemplo n.º 3
0
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 4MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
	pgd_t *pgd;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	do {
		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);

		pmd = *pmdp;
		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	flush_tlb_kernel_range(virt, end);
}
Exemplo n.º 4
0
/*
 * maps a range of vmalloc()ed memory into the requested pages. the old
 * mappings are removed.
 */
static inline void vmap_pte_range (pte_t *pte, unsigned long address,
                                   unsigned long size, unsigned long vaddr)
{
    unsigned long end;
    pgd_t *vdir;
    pmd_t *vpmd;
    pte_t *vpte;

    address &= ~PMD_MASK;
    end = address + size;
    if (end > PMD_SIZE)
        end = PMD_SIZE;
    do {
        pte_t oldpage = *pte;
        struct page * page;
        pte_clear(pte);

        vdir = pgd_offset_k (vaddr);
        vpmd = pmd_offset (vdir, vaddr);
        vpte = pte_offset (vpmd, vaddr);
        page = pte_page (*vpte);

        set_pte(pte, mk_pte(page, PAGE_USERIO));
        forget_pte(oldpage);
        address += PAGE_SIZE;
        vaddr += PAGE_SIZE;
        pte++;
    } while (address < end);
}
Exemplo n.º 5
0
int ioremap_page_range(unsigned long addr,
		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long start;
	unsigned long next;
	int err;

	might_sleep();
	BUG_ON(addr >= end);

	start = addr;
	phys_addr -= addr;
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);

	flush_cache_vmap(start, end);

	return err;
}
Exemplo n.º 6
0
/*
 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
 * Refer to asm/processor.h for System Memory Map
 *
 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
 */
noinline static int handle_kernel_vaddr_fault(unsigned long address)
{
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 */
	pgd_t *pgd, *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;

	pgd = pgd_offset_fast(current->active_mm, address);
	pgd_k = pgd_offset_k(address);

	if (!pgd_present(*pgd_k))
		goto bad_area;

	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		goto bad_area;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		goto bad_area;

	set_pmd(pmd, *pmd_k);

	/* XXX: create the TLB entry here */
	return 0;

bad_area:
	return 1;
}
Exemplo n.º 7
0
static void shmedia_unmapioaddr(unsigned long vaddr)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	pgdp = pgd_offset_k(vaddr);
	if (pgd_none(*pgdp) || pgd_bad(*pgdp))
		return;

	pudp = pud_offset(pgdp, vaddr);
	if (pud_none(*pudp) || pud_bad(*pudp))
		return;

	pmdp = pmd_offset(pudp, vaddr);
	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
		return;

	ptep = pte_offset_kernel(pmdp, vaddr);

	if (pte_none(*ptep) || !pte_present(*ptep))
		return;

	clear_page((void *)ptep);
	pte_clear(&init_mm, vaddr, ptep);
}
Exemplo n.º 8
0
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
	pgd_t *pgd = pgd_offset_k(vaddr);
	pmd_t *pmd = pmd_offset(pgd, vaddr);
	pte_t *pte;
	unsigned long i;

	n_pages = ALIGN(n_pages, PTRS_PER_PTE);

	pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
		 __func__, vaddr, n_pages);

	pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);

	for (i = 0; i < n_pages; ++i)
		pte_clear(NULL, 0, pte + i);

	for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
		pte_t *cur_pte = pte + i;

		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
		BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
		pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
			 __func__, pmd, cur_pte);
	}
	return pte;
}
Exemplo n.º 9
0
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
                    size_t size, const struct mem_type *type)
{
    unsigned long addr = virt, end = virt + size;
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    unmap_area_sections(virt, size);

    pgd = pgd_offset_k(addr);
    pud = pud_offset(pgd, addr);
    pmd = pmd_offset(pud, addr);
    do {
        pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
        pfn += SZ_1M >> PAGE_SHIFT;
        pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
        pfn += SZ_1M >> PAGE_SHIFT;
        flush_pmd_entry(pmd);

        addr += PMD_SIZE;
        pmd += 2;
    } while (addr < end);

    return 0;
}
Exemplo n.º 10
0
/*
 * Walk a vmap address to the struct page it maps.
 */
struct page *vmalloc_to_page(const void *vmalloc_addr)
{
	unsigned long addr = (unsigned long) vmalloc_addr;
	struct page *page = NULL;
	pgd_t *pgd = pgd_offset_k(addr);

	/*
	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
	 * architectures that do not vmalloc module space
	 */
	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));

	if (!pgd_none(*pgd)) {
		pud_t *pud = pud_offset(pgd, addr);
		if (!pud_none(*pud)) {
			pmd_t *pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd)) {
				pte_t *ptep, pte;

				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
					page = pte_page(pte);
				pte_unmap(ptep);
			}
		}
	}
	return page;
}
Exemplo n.º 11
0
/* Ensure all existing pages follow the policy. */
static int
verify_pages(unsigned long addr, unsigned long end, unsigned long *nodes)
{
	while (addr < end) {
		struct page *p;
		pte_t *pte;
		pmd_t *pmd;
		pgd_t *pgd = pgd_offset_k(addr);
		if (pgd_none(*pgd)) {
			addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
			continue;
		}
		pmd = pmd_offset(pgd, addr);
		if (pmd_none(*pmd)) {
			addr = (addr + PMD_SIZE) & PMD_MASK;
			continue;
		}
		p = NULL;
		pte = pte_offset_map(pmd, addr);
		if (pte_present(*pte))
			p = pte_page(*pte);
		pte_unmap(pte);
		if (p) {
			unsigned nid = page_to_nid(p);
			if (!test_bit(nid, nodes))
				return -EIO;
		}
		addr += PAGE_SIZE;
	}
	return 0;
}
Exemplo n.º 12
0
struct page *vmalloc_to_page(const void *vmalloc_addr)
{
	unsigned long addr = (unsigned long) vmalloc_addr;
	struct page *page = NULL;
	pgd_t *pgd = pgd_offset_k(addr);

	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));

	if (!pgd_none(*pgd)) {
		pud_t *pud = pud_offset(pgd, addr);
		if (!pud_none(*pud)) {
			pmd_t *pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd)) {
				pte_t *ptep, pte;

				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
					page = pte_page(pte);
				pte_unmap(ptep);
			}
		}
	}
	return page;
}
Exemplo n.º 13
0
/*
 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
 * is used to determine if a linear map page has been marked as not-valid by
 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
 * This is based on kern_addr_valid(), which almost does what we need.
 *
 * Because this is only called on the kernel linear map,  p?d_sect() implies
 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
 * disabled.
 */
bool kernel_page_present(struct page *page)
{
	pgd_t *pgdp;
	pud_t *pudp, pud;
	pmd_t *pmdp, pmd;
	pte_t *ptep;
	unsigned long addr = (unsigned long)page_address(page);

	pgdp = pgd_offset_k(addr);
	if (pgd_none(READ_ONCE(*pgdp)))
		return false;

	pudp = pud_offset(pgdp, addr);
	pud = READ_ONCE(*pudp);
	if (pud_none(pud))
		return false;
	if (pud_sect(pud))
		return true;

	pmdp = pmd_offset(pudp, addr);
	pmd = READ_ONCE(*pmdp);
	if (pmd_none(pmd))
		return false;
	if (pmd_sect(pmd))
		return true;

	ptep = pte_offset_kernel(pmdp, addr);
	return pte_valid(READ_ONCE(*ptep));
}
Exemplo n.º 14
0
/*
 * copy_user_page
 * @to: P1 address
 * @from: P1 address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void copy_user_page(void *to, void *from, unsigned long address, 
		    struct page *page)
{
	__set_bit(PG_mapped, &page->flags);
	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		copy_page(to, from);
	else {
		pgprot_t pgprot = __pgprot(_PAGE_PRESENT | 
					   _PAGE_RW | _PAGE_CACHABLE |
					   _PAGE_DIRTY | _PAGE_ACCESSED | 
					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
		unsigned long phys_addr = PHYSADDR(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *dir = pgd_offset_k(p3_addr);
		pmd_t *pmd = pmd_offset(dir, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
		set_pte(pte, entry);
		local_irq_save(flags);
		__flush_tlb_page(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__copy_user_page((void *)p3_addr, from, to);
		pte_clear(&init_mm, p3_addr, pte);
		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
	}
}
Exemplo n.º 15
0
/*From: http://www.scs.ch/~frey/linux/memorymap.html*/
volatile void *virt_to_kseg(volatile void *address) {
    pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte;
    unsigned long va, ret = 0UL;
    va=VMALLOC_VMADDR((unsigned long)address);
    /* get the page directory. Use the kernel memory map. */
    pgd = pgd_offset_k(va);
    /* check whether we found an entry */
    if (!pgd_none(*pgd)) {
         /*I'm not sure if we need this, or the line for 2.4*/
            /*above will work reliably too*/
         /*If you know, please email me :-)*/
        pud_t *pud = pud_offset(pgd, va);       
        pmd = pmd_offset(pud, va);
        /* check whether we found an entry */
        if (!pmd_none(*pmd)) {
            /* get a pointer to the page table entry */
            ptep = pte_offset_map(pmd, va);
            pte = *ptep;
            /* check for a valid page */
            if (pte_present(pte)) {
                /* get the address the page is refering to */
                ret = (unsigned long)page_address(pte_page(pte));
                /* add the offset within the page to the page address */
                ret |= (va & (PAGE_SIZE -1));
            }
        }
    }
    return((volatile void *)ret);
}
Exemplo n.º 16
0
/*
 * map_kernel_page currently only called by __ioremap
 * map_kernel_page adds an entry to the ioremap page table
 * and adds an entry to the HPT, possibly bolting it
 */
int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
	if (slab_is_available()) {
		pgdp = pgd_offset_k(ea);
		pudp = pud_alloc(&init_mm, pgdp, ea);
		if (!pudp)
			return -ENOMEM;
		pmdp = pmd_alloc(&init_mm, pudp, ea);
		if (!pmdp)
			return -ENOMEM;
		ptep = pte_alloc_kernel(pmdp, ea);
		if (!ptep)
			return -ENOMEM;
		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
							  __pgprot(flags)));
	} else {
		/*
		 * If the mm subsystem is not fully up, we cannot create a
		 * linux page table entry for this mapping.  Simply bolt an
		 * entry in the hardware page table.
		 *
		 */
		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
Exemplo n.º 17
0
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
    unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmdp;

    flush_cache_vunmap(addr, end);
    pgd = pgd_offset_k(addr);
    pud = pud_offset(pgd, addr);
    pmdp = pmd_offset(pud, addr);
    do {
        pmd_t pmd = *pmdp;

        if (!pmd_none(pmd)) {
            pmd_clear(pmdp);
            init_mm.context.kvm_seq++;

            if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
        }

        addr += PMD_SIZE;
        pmdp += 2;
    } while (addr < end);

    if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
        __check_kvm_seq(current->active_mm);

    flush_tlb_kernel_range(virt, end);
}
Exemplo n.º 18
0
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
                         size_t size, const struct mem_type *type)
{
    unsigned long addr = virt, end = virt + size;
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    unmap_area_sections(virt, size);

    pgd = pgd_offset_k(virt);
    pud = pud_offset(pgd, addr);
    pmd = pmd_offset(pud, addr);
    do {
        unsigned long super_pmd_val, i;

        super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
                        PMD_SECT_SUPER;
        super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

        for (i = 0; i < 8; i++) {
            pmd[0] = __pmd(super_pmd_val);
            pmd[1] = __pmd(super_pmd_val);
            flush_pmd_entry(pmd);

            addr += PMD_SIZE;
            pmd += 2;
        }

        pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
    } while (addr < end);

    return 0;
}
Exemplo n.º 19
0
/*
 * map_io_page currently only called by __ioremap
 * map_io_page adds an entry to the ioremap page table
 * and adds an entry to the HPT, possibly bolting it
 */
static int map_io_page(unsigned long ea, unsigned long pa, int flags)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	if (mem_init_done) {
		pgdp = pgd_offset_k(ea);
		pudp = pud_alloc(&init_mm, pgdp, ea);
		if (!pudp)
			return -ENOMEM;
		pmdp = pmd_alloc(&init_mm, pudp, ea);
		if (!pmdp)
			return -ENOMEM;
		ptep = pte_alloc_kernel(pmdp, ea);
		if (!ptep)
			return -ENOMEM;
		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
							  __pgprot(flags)));
	} else {
		/*
		 * If the mm subsystem is not fully up, we cannot create a
		 * linux page table entry for this mapping.  Simply bolt an
		 * entry in the hardware page table.
		 *
		 */
		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
Exemplo n.º 20
0
/*
 * Create PGD aligned trampoline table to allow real mode initialization
 * of additional CPUs. Consume only 1 low memory page.
 */
void __meminit init_trampoline(void)
{
	unsigned long paddr, paddr_next;
	pgd_t *pgd;
	pud_t *pud_page, *pud_page_tramp;
	int i;

	if (!kaslr_memory_enabled()) {
		init_trampoline_default();
		return;
	}

	pud_page_tramp = alloc_low_page();

	paddr = 0;
	pgd = pgd_offset_k((unsigned long)__va(paddr));
	pud_page = (pud_t *) pgd_page_vaddr(*pgd);

	for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
		pud_t *pud, *pud_tramp;
		unsigned long vaddr = (unsigned long)__va(paddr);

		pud_tramp = pud_page_tramp + pud_index(paddr);
		pud = pud_page + pud_index(vaddr);
		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;

		*pud_tramp = *pud;
	}

	set_pgd(&trampoline_pgd_entry,
		__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
Exemplo n.º 21
0
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
				 unsigned long size, unsigned long flags)
{
	int error;
	pgd_t *pgd;
	unsigned long end = address + size;

	phys_addr -= address;
	pgd = pgd_offset_k(address);
	flush_cache_all();
	if (address >= end)
		BUG();
	do {
		pud_t *pud;
		pud = pud_alloc(&init_mm, pgd, address);
		error = -ENOMEM;
		if (!pud)
			break;
		if (remap_area_pud(pud, address, end - address,
					 phys_addr + address, flags))
			break;
		error = 0;
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		pgd++;
	} while (address && (address < end));
	flush_tlb_all();
	return error;
}
Exemplo n.º 22
0
/* we parse the page tables in order to find the direct mapping of
   the page. This works only without holding any locks for pages we
   are sure that they do not move in memory.
*/
volatile void *virt_to_kseg(volatile void *address)
{
        pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte;
        unsigned long va, ret = 0UL;
        
        va=VMALLOC_VMADDR((unsigned long)address);
        
        /* get the page directory. Use the kernel memory map. */
        pgd = pgd_offset_k(va);

        /* check whether we found an entry */
        if (!pgd_none(*pgd))
        {
              /* get the page middle directory */
              pmd = pmd_offset(pgd, va);
              /* check whether we found an entry */
              if (!pmd_none(*pmd))
              {
                  /* get a pointer to the page table entry */
                  ptep = pte_offset(pmd, va);
                  pte = *ptep;
                  /* check for a valid page */
                  if (pte_present(pte))
                  {
                        /* get the address the page is refering to */
                        ret = (unsigned long)page_address(pte_page(pte));
                        /* add the offset within the page to the page address */
                        ret |= (va & (PAGE_SIZE -1));
                  }
              }
        }
        return((volatile void *)ret);
}
Exemplo n.º 23
0
static pte_t *__get_pte_phys(unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return NULL;
	}

	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return NULL;
	}

	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return NULL;
	}

	return pte_offset_kernel(pmd, addr);
}
Exemplo n.º 24
0
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long address;
	int i;

	for (i = 0; i < numpages; i++) {
		address = page_to_phys(page + i);
		pgd = pgd_offset_k(address);
		pud = pud_offset(pgd, address);
		pmd = pmd_offset(pud, address);
		pte = pte_offset_kernel(pmd, address);
		if (!enable) {
			__ptep_ipte(address, pte);
			pte_val(*pte) = _PAGE_TYPE_EMPTY;
			continue;
		}
		*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
		/* Flush cpu write queue. */
		mb();
	}
}
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
		    size_t size, const struct mem_type *type)
{
	unsigned long addr = virt, end = virt + size;
	pgd_t *pgd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(addr);
	do {
		pmd_t *pmd = pmd_offset(pgd, addr);

		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
		pfn += SZ_1M >> PAGE_SHIFT;
		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
		pfn += SZ_1M >> PAGE_SHIFT;
		flush_pmd_entry(pmd);

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	return 0;
}
Exemplo n.º 26
0
int syscall_hooking_init(void)
{
	if( (sys_call_table = locate_sys_call_table()) == NULL){
		printk("<0> Can't find sys_call_table\n");
		return -1;
	}
	pgd_t *pgd = pgd_offset_k((unsigned long)sys_call_table);
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	if( pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, (unsigned long)sys_call_table);
	if( pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, (unsigned long)sys_call_table);
	if( pmd_none(*pmd))
		return NULL;
	if( pmd_large(*pmd)){
		pte = (pte_t *)pmd;
	}else{
		pte = pte_offset_kernel(pmd, (unsigned long)sys_call_table);
	}
	pte->pte_low |= _PAGE_KERNEL;
	__flush_tlb_single((unsigned long)sys_call_table);
	printk("<0> sys_call_table is loaded at %p\n", sys_call_table);

	original_call = (void *)sys_call_table[__NR_open];
	sys_call_table[__NR_open] = (void *)sys_our_open;
	
	printk("<0> Module Init\n");
	return 0;
}
Exemplo n.º 27
0
static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
{
    /*
     * Synchronize this task's top level page-table
     * with the 'reference' page table.
     */
    pgd_t *pgd, *pgd_k;
    pud_t *pud, *pud_k;
    pmd_t *pmd, *pmd_k;

    pgd = pgd_offset_fast(mm, address);
    pgd_k = pgd_offset_k(address);

    if (!pgd_present(*pgd_k))
        goto bad_area;

    pud = pud_offset(pgd, address);
    pud_k = pud_offset(pgd_k, address);
    if (!pud_present(*pud_k))
        goto bad_area;

    pmd = pmd_offset(pud, address);
    pmd_k = pmd_offset(pud_k, address);
    if (!pmd_present(*pmd_k))
        goto bad_area;

    set_pmd(pmd, *pmd_k);

    /* XXX: create the TLB entry here */
    return 0;

bad_area:
    return 1;
}
Exemplo n.º 28
0
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return;
	}

	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return;
	}

	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return;
	}

	pte = pte_offset_kernel(pmd, addr);
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
	local_flush_tlb_one(get_asid(), addr);
}
Exemplo n.º 29
0
/*
 * need to get a 16k page for level 1
 */
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;
	unsigned long flags;

	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	pgd_list_lock(flags);
	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
	pgd_list_add(new_pgd);
	pgd_list_unlock(flags);

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

	if (!vectors_high()) {
#ifdef CONFIG_ARM_FCSE
		/* FCSE does not work without high vectors. */
		BUG();
#endif /* CONFIG_ARM_FCSE */

		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors.
		 */
		new_pmd = pmd_alloc(mm, new_pgd, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		init_pmd = pmd_offset(init_pgd, 0);
		init_pte = pte_offset_map_nested(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap_nested(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
no_pmd:
	free_pages((unsigned long)new_pgd, 2);
no_pgd:
	return NULL;
}
Exemplo n.º 30
0
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
	/*
	 * After the lock is released, the original page table is restored.
	 */
	int pgd_idx, i;
	int nr_pgds;
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;

	if (!efi_enabled(EFI_OLD_MEMMAP)) {
		write_cr3((unsigned long)save_pgd);
		__flush_tlb_all();
		return;
	}

	nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);

	for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
		pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
		set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);

		if (!(pgd_val(*pgd) & _PAGE_PRESENT))
			continue;

		for (i = 0; i < PTRS_PER_P4D; i++) {
			p4d = p4d_offset(pgd,
					 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);

			if (!(p4d_val(*p4d) & _PAGE_PRESENT))
				continue;

			pud = (pud_t *)p4d_page_vaddr(*p4d);
			pud_free(&init_mm, pud);
		}

		p4d = (p4d_t *)pgd_page_vaddr(*pgd);
		p4d_free(&init_mm, p4d);
	}

	kfree(save_pgd);

	__flush_tlb_all();
	early_code_mapping_set_exec(0);
}