コード例 #1
0
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
				unsigned long addr, unsigned long end,
				swp_entry_t entry, struct page *page)
{
	pte_t swp_pte = swp_entry_to_pte(entry);
	pte_t *pte;
	int ret = 0;

	/*
	 * We don't actually need pte lock while scanning for swp_pte: since
	 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
	 * page table while we're scanning; though it could get zapped, and on
	 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
	 * of unmatched parts which look like swp_pte, so unuse_pte must
	 * recheck under pte lock.  Scanning without pte lock lets it be
	 * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
	 */
	pte = pte_offset_map(pmd, addr);
	do {
		/*
		 * swapoff spends a _lot_ of time in this loop!
		 * Test inline before going to call unuse_pte.
		 */
		if (unlikely(pte_same(*pte, swp_pte))) {
			pte_unmap(pte);
			ret = unuse_pte(vma, pmd, addr, entry, page);
			if (ret)
				goto out;
			pte = pte_offset_map(pmd, addr);
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
	pte_unmap(pte - 1);
out:
	return ret;
}
コード例 #2
0
ファイル: memmap.c プロジェクト: GodFox/magx_kernel_xpixl
static void
count_pmd_pages(struct mm_struct * mm, struct vm_area_struct * vma,
                pmd_t *dir, unsigned long address, unsigned long end,
                signed char *data_buf, int node_map)
{
    pte_t * pte;
    unsigned long pmd_end;
    struct page *page;
    unsigned long pfn;
    int val, index;

    if (pmd_none(*dir))
        return;
    pmd_end = (address + PMD_SIZE) & PMD_MASK;
    if (end > pmd_end)
        end = pmd_end;
    index = 0;
    do {
        pte = pte_offset_map(dir, address);
        if (!pte_none(pte) && pte_present(*pte)) {
            pfn = pte_pfn(*pte);
            if (pfn_valid(pfn)) {
                page = pfn_to_page(pfn);
                val = node_map ? page_to_nid(page) :
                      page_count(page);
                val = (val > 99) ? 99 : val;
                data_buf[index] = val;
            }
        }
        address += PAGE_SIZE;
        pte++;
        index++;
    } while (address && (address < end));
    return;
}
コード例 #3
0
/*
 * Walk a vmap address to the struct page it maps.
 */
struct page *vmalloc_to_page(const void *vmalloc_addr)
{
	unsigned long addr = (unsigned long) vmalloc_addr;
	struct page *page = NULL;
	pgd_t *pgd = pgd_offset_k(addr);

	/*
	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
	 * architectures that do not vmalloc module space
	 */
	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));

	if (!pgd_none(*pgd)) {
		pud_t *pud = pud_offset(pgd, addr);
		if (!pud_none(*pud)) {
			pmd_t *pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd)) {
				pte_t *ptep, pte;

				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
					page = pte_page(pte);
				pte_unmap(ptep);
			}
		}
	}
	return page;
}
コード例 #4
0
ファイル: tlb.c プロジェクト: CSU-GH/okl4_3.0
pte_t *
lookup_pte(pgd_t *page_dir, unsigned long pf_address)
{
	/*
	 * find the page table entry within the page table hierarchy
	 */
	pte_t *pte = NULL;
	pgd_t *pgd = page_dir + pgd_index(pf_address);

#ifdef DEBUG_LOOKUP_PTABS
	if ((int)page_dir < 0x1000) {
		printk("%s: page_dir=%x\n", __func__, (int)page_dir);
		enter_kdebug("page_dir<4096");
	}
	printk("%s: %lx pdir = %p\n", __func__, pf_address, pgd);
#endif
	if (pgd_present(*pgd)) {
		pmd_t *pmd = pmd_offset(pgd, pf_address);
#ifdef DEBUG_LOOKUP_PTABS
		printk("pgd_present(*%x) is true\n", pgd);
		printk(" pmd = %p\n", pmd);
#endif
		if (pmd_present(*pmd)) {
#ifdef DEBUG_LOOKUP_PTABS
			printk("pmd_present(*%x) is true\n", pmd);
#endif
			pte = pte_offset_map(pmd, pf_address);
		}
	}
#ifdef DEBUG_LOOKUP_PTABS
	printk("returning:  pte = %p\n", pte);
#endif
	return pte;
}
コード例 #5
0
ファイル: kmem_mapper.c プロジェクト: anastop/htsynch
/*From: http://www.scs.ch/~frey/linux/memorymap.html*/
volatile void *virt_to_kseg(volatile void *address) {
    pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte;
    unsigned long va, ret = 0UL;
    va=VMALLOC_VMADDR((unsigned long)address);
    /* get the page directory. Use the kernel memory map. */
    pgd = pgd_offset_k(va);
    /* check whether we found an entry */
    if (!pgd_none(*pgd)) {
         /*I'm not sure if we need this, or the line for 2.4*/
            /*above will work reliably too*/
         /*If you know, please email me :-)*/
        pud_t *pud = pud_offset(pgd, va);       
        pmd = pmd_offset(pud, va);
        /* check whether we found an entry */
        if (!pmd_none(*pmd)) {
            /* get a pointer to the page table entry */
            ptep = pte_offset_map(pmd, va);
            pte = *ptep;
            /* check for a valid page */
            if (pte_present(pte)) {
                /* get the address the page is refering to */
                ret = (unsigned long)page_address(pte_page(pte));
                /* add the offset within the page to the page address */
                ret |= (va & (PAGE_SIZE -1));
            }
        }
    }
    return((volatile void *)ret);
}
コード例 #6
0
ファイル: oleole_spt.c プロジェクト: nminoru/oleolevm
int oleole_get_gPTE_offset_without_alloc(struct mm_struct *mm, pte_t **result, unsigned long address)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd   = pgd_offset(mm, address);
	if (unlikely(pgd_none(*pgd)))
		return -1;

	pud = pud_offset(pgd, address);

	if (unlikely(oleole_pud_none(*pud)))
		return -1;

	pmd = oleole_pmd_offset(pud, address);

	if (unlikely(oleole_pmd_none(*pmd)))
		return -1;

	pte  = pte_offset_map(pmd, address);

	*result = pte;

	return 0;
}
コード例 #7
0
ファイル: migrate.c プロジェクト: lastweek/NVM
static unsigned long clear_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
				     unsigned long addr, unsigned long end)
{
	pte_t *pte;
	pte_t ptecont;

	do {
		pte = pte_offset_map(pmd, addr);
		ptecont = *pte;

		if (pte_none(ptecont))
			continue;

		/*
		 * pte_young is a confusing name, though it AND _PAGE_ACCESSED
		 * Instead, I think we should call it pte_accessed
		 */
		if (pte_present(ptecont) && pte_young(ptecont)) {
			/*
			 * The physical page, which this pte points to, has
			 * been read or written to during this time period.
			 */
			DEBUG_INFO("[%#016lx - %#016lx], pfn = %#013lx", addr, end, pte_pfn(ptecont));
			collect_statistics(pte_pfn(ptecont));
			pte_clear_flags(ptecont, _PAGE_ACCESSED);
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);

	return addr;
}
コード例 #8
0
ファイル: tlb_hash64.c プロジェクト: quadcores/cbs_4.2.4
void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
{
    pte_t *pte;
    pte_t *start_pte;
    unsigned long flags;

    addr = _ALIGN_DOWN(addr, PMD_SIZE);
    /* Note: Normally, we should only ever use a batch within a
     * PTE locked section. This violates the rule, but will work
     * since we don't actually modify the PTEs, we just flush the
     * hash while leaving the PTEs intact (including their reference
     * to being hashed). This is not the most performance oriented
     * way to do things but is fine for our needs here.
     */
    local_irq_save(flags);
    arch_enter_lazy_mmu_mode();
    start_pte = pte_offset_map(pmd, addr);
    for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
        unsigned long pteval = pte_val(*pte);
        if (pteval & _PAGE_HASHPTE)
            hpte_need_flush(mm, addr, pte, pteval, 0);
        addr += PAGE_SIZE;
    }
    arch_leave_lazy_mmu_mode();
    local_irq_restore(flags);
}
コード例 #9
0
ファイル: msync.c プロジェクト: BackupTheBerlios/tuxap
static int filemap_sync_pte_range(pmd_t * pmd,
	unsigned long address, unsigned long end, 
	struct vm_area_struct *vma, unsigned int flags)
{
	pte_t *pte;
	int error;

	if (pmd_none(*pmd))
		return 0;
	if (pmd_bad(*pmd)) {
		pmd_ERROR(*pmd);
		pmd_clear(pmd);
		return 0;
	}
	pte = pte_offset_map(pmd, address);
	if ((address & PMD_MASK) != (end & PMD_MASK))
		end = (address & PMD_MASK) + PMD_SIZE;
	error = 0;
	do {
		error |= filemap_sync_pte(pte, vma, address, flags);
		address += PAGE_SIZE;
		pte++;
	} while (address && (address < end));

	pte_unmap(pte - 1);

	return error;
}
コード例 #10
0
/*
 * Dump out the page tables associated with 'addr' in mm 'mm'.
 */
void show_pte(struct mm_struct *mm, unsigned long addr)
{
    pgd_t *pgd;

    if (!mm)
        mm = &init_mm;

    pr_alert("pgd = %p\n", mm->pgd);
    pgd = pgd_offset(mm, addr);
    pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));

    do {
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;

        if (pgd_none(*pgd) || pgd_bad(*pgd))
            break;

        pud = pud_offset(pgd, addr);
        if (pud_none(*pud) || pud_bad(*pud))
            break;

        pmd = pmd_offset(pud, addr);
        printk(", *pmd=%016llx", pmd_val(*pmd));
        if (pmd_none(*pmd) || pmd_bad(*pmd))
            break;

        pte = pte_offset_map(pmd, addr);
        printk(", *pte=%016llx", pte_val(*pte));
        pte_unmap(pte);
    } while(0);

    printk("\n");
}
コード例 #11
0
ファイル: wss.c プロジェクト: dpwong/cse430
int kthread_wss(void *data)
{
	unsigned long va;
	int ret;
	int wss;

	pgd_t *pgd;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *ptep;
	
	struct task_struct *task;
	while(!kthread_should_stop())
	{
		printk(KERN_INFO "Checking process' WSS.\n");
		for_each_process(task)
		{
			wss = 0;
			if(task->mm != NULL)
			{
				struct vm_area_struct *temp = task->mm->mmap;
				while(temp)
				{
					if(temp->vm_flags & VM_IO){}
					else
					{
						for(va = temp->vm_start; va < temp->vm_end; va+=PAGE_SIZE)
						{
				  			pgd = pgd_offset(task->mm,va);
			 		  		if(pgd_none(*pgd))
								break;
							pud = pud_offset(pgd,va);
							if(pud_none(*pud))
								break;
							pmd = pmd_offset(pud,va);
							if(pmd_none(*pmd))
								break;
							ptep = pte_offset_map(pmd,va);
							ret = 0;
							if(pte_young(*ptep))
							{
								ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,												(unsigned long *) &ptep->pte);
								wss++;
							}
							if(ret)
							{
								pte_update(task->mm, va, ptep);
							}
							pte_unmap(ptep);
						}
					}
					temp = temp->vm_next;
				}
				printk(KERN_INFO "%i: %i\n", task->pid, wss);
			}
		}
	msleep(1000);
	}
	return 0;
}
コード例 #12
0
ファイル: oleole_spt.c プロジェクト: nminoru/oleolevm
int oleole_get_gPTEInfo_offset(struct mm_struct *mm, pte_t **result, unsigned long address)
{
	pgd_t *pgd, pgd_v;
	pud_t *pud, pud_v;
	pmd_t *pmd, pmd_v;
	pte_t *pte;

	pgd   = pgd_offset(mm, address);
	pgd_v = *pgd;
	if (pgd_none(pgd_v))
		if (pud_alloc(mm, pgd, address) == NULL)
			return -ENOMEM;

	pud   = pud_offset(pgd, address);
	pud_v = *pud;

	if (oleole_pud_none(pud_v))
		if (oleole_pmd_alloc(pud))
			return -ENOMEM;

	pmd   = pmd_offset(pud, address);
	pmd_v = *pmd;

	if (oleole_pmd_none(pmd_v))
		if (oleole_pte_alloc(pmd))
			return -ENOMEM;

	pte  = pte_offset_map(pmd, address);

	*result = pte;

	return 0;
}
コード例 #13
0
ファイル: tlb-r8k.c プロジェクト: 12019/hg556a_source
/*
 * We will need multiple versions of update_mmu_cache(), one that just
 * updates the TLB with the new pte(s), and another which also checks
 * for the R4k "end of page" hardware bug and does the needy.
 */
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
	unsigned long flags;
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;
	int pid;

	/*
	 * Handle debugger faulting in for debugee.
	 */
	if (current->active_mm != vma->vm_mm)
		return;

	pid = read_c0_entryhi() & ASID_MASK;

	local_irq_save(flags);
	address &= PAGE_MASK;
	write_c0_vaddr(address);
	write_c0_entryhi(pid);
	pgdp = pgd_offset(vma->vm_mm, address);
	pmdp = pmd_offset(pgdp, address);
	ptep = pte_offset_map(pmdp, address);
	tlb_probe();

	write_c0_entrylo(pte_val(*ptep++) >> 6);
	tlb_write();

	write_c0_entryhi(pid);
	local_irq_restore(flags);
}
コード例 #14
0
ファイル: fault.c プロジェクト: kizukukoto/WDN900_GPL
/*
 * fault_is_priv()
 *	Return true if the fault is a privilege violation.
 */
STATIC int fault_is_priv(struct pt_regs *regs,
			 unsigned long missqw0, 
			 unsigned long missqw1)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *ptep;
	unsigned long address;

	/*
	 * Check if this is user or kernel in pt_regs/CSR.
	 */
	pgd = (pgd_t *)(MMU_MISSQW0_PGD_GET(missqw0) << MMU_MISSQW0_PGD_SHIFT);
	address =  (unsigned long)(MMU_MISSQW1_VPN_GET(missqw1) << MMU_VPN_SHIFT);
	pmd = (pmd_t *)__pgd_offset(pgd, address);
	if (unlikely(pmd_none(*pmd)) || (unlikely(pmd_bad(*pmd)))) {
		return 0;
	}

	ptep = pte_offset_map(pmd, address);
	if (unlikely(pte_none(*ptep)) || (unlikely(pte_bad(*ptep)))) {
		return 0;
	}

	/*
	 * If the PTE is a supervisory PTE and we are in user_mode()
	 * declare this as a privilege violation.
	 */
	if (user_mode(regs) && ((pte_val(*ptep) & L_PTE_USER) == 0)) {
		return 1;
	}
	return 0;
}
コード例 #15
0
ファイル: mempolicy.c プロジェクト: kzlin129/tt-gpl
/* Ensure all existing pages follow the policy. */
static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
		unsigned long addr, unsigned long end, unsigned long *nodes)
{
	pte_t *orig_pte;
	pte_t *pte;

	spin_lock(&mm->page_table_lock);
	orig_pte = pte = pte_offset_map(pmd, addr);
	do {
		unsigned long pfn;
		unsigned int nid;

		if (!pte_present(*pte))
			continue;
		pfn = pte_pfn(*pte);
		if (!pfn_valid(pfn))
			continue;
		nid = pfn_to_nid(pfn);
		if (!test_bit(nid, nodes))
			break;
	} while (pte++, addr += PAGE_SIZE, addr != end);
	pte_unmap(orig_pte);
	spin_unlock(&mm->page_table_lock);
	return addr != end;
}
コード例 #16
0
/**
 * Replace the PFN of a PTE with the address of the actual page.
 *
 * The caller maps a reserved dummy page at the address with the desired access
 * and flags.
 *
 * This hack is required for older Linux kernels which don't provide
 * remap_pfn_range().
 *
 * @returns 0 on success, -ENOMEM on failure.
 * @param   mm          The memory context.
 * @param   ulAddr      The mapping address.
 * @param   Phys        The physical address of the page to map.
 */
static int rtR0MemObjLinuxFixPte(struct mm_struct *mm, unsigned long ulAddr, RTHCPHYS Phys)
{
    int rc = -ENOMEM;
    pgd_t *pgd;

    spin_lock(&mm->page_table_lock);

    pgd = pgd_offset(mm, ulAddr);
    if (!pgd_none(*pgd) && !pgd_bad(*pgd))
    {
        pmd_t *pmd = pmd_offset(pgd, ulAddr);
        if (!pmd_none(*pmd))
        {
            pte_t *ptep = pte_offset_map(pmd, ulAddr);
            if (ptep)
            {
                pte_t pte = *ptep;
                pte.pte_high &= 0xfff00000;
                pte.pte_high |= ((Phys >> 32) & 0x000fffff);
                pte.pte_low  &= 0x00000fff;
                pte.pte_low  |= (Phys & 0xfffff000);
                set_pte(ptep, pte);
                pte_unmap(ptep);
                rc = 0;
            }
        }
コード例 #17
0
/* mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
	unsigned long address, unsigned long size, unsigned long offset,
	swp_entry_t entry, struct page* page)
{
	struct pte_chain * pte_chain = NULL;
	pte_t *pte, *mapping;
	unsigned long end;

	if (pmd_none(*dir))
		return;
	if (pmd_bad(*dir)) {
		pmd_ERROR(*dir);
		pmd_clear(dir);
		return;
	}
	mapping = pte = pte_offset_map(dir, address);
	offset += address & PMD_MASK;
	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		/*
		 * FIXME: handle pte_chain_alloc() failures
		 */
		if (pte_chain == NULL)
			pte_chain = pte_chain_alloc(GFP_ATOMIC);
		unuse_pte(vma, offset+address-vma->vm_start,
			       pte, entry, page, &pte_chain);
		address += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
	pte_unmap(mapping);
	pte_chain_free(pte_chain);
}
コード例 #18
0
struct page *vmalloc_to_page(const void *vmalloc_addr)
{
	unsigned long addr = (unsigned long) vmalloc_addr;
	struct page *page = NULL;
	pgd_t *pgd = pgd_offset_k(addr);

	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));

	if (!pgd_none(*pgd)) {
		pud_t *pud = pud_offset(pgd, addr);
		if (!pud_none(*pud)) {
			pmd_t *pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd)) {
				pte_t *ptep, pte;

				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
					page = pte_page(pte);
				pte_unmap(ptep);
			}
		}
	}
	return page;
}
コード例 #19
0
ファイル: mempolicy.c プロジェクト: BackupTheBerlios/tuxap
/* Ensure all existing pages follow the policy. */
static int
verify_pages(unsigned long addr, unsigned long end, unsigned long *nodes)
{
	while (addr < end) {
		struct page *p;
		pte_t *pte;
		pmd_t *pmd;
		pgd_t *pgd = pgd_offset_k(addr);
		if (pgd_none(*pgd)) {
			addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
			continue;
		}
		pmd = pmd_offset(pgd, addr);
		if (pmd_none(*pmd)) {
			addr = (addr + PMD_SIZE) & PMD_MASK;
			continue;
		}
		p = NULL;
		pte = pte_offset_map(pmd, addr);
		if (pte_present(*pte))
			p = pte_page(*pte);
		pte_unmap(pte);
		if (p) {
			unsigned nid = page_to_nid(p);
			if (!test_bit(nid, nodes))
				return -EIO;
		}
		addr += PAGE_SIZE;
	}
	return 0;
}
コード例 #20
0
/*
 * need to get a 16k page for level 1
 */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pud_t *new_pud, *init_pud;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

	if (!vectors_high()) {
		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors.
		 */
		new_pud = pud_alloc(mm, new_pgd, 0);
		if (!new_pud)
			goto no_pud;

		new_pmd = pmd_alloc(mm, new_pud, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		init_pud = pud_offset(init_pgd, 0);
		init_pmd = pmd_offset(init_pud, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
no_pmd:
	pud_free(mm, new_pud);
no_pud:
	free_pages((unsigned long)new_pgd, 2);
no_pgd:
	return NULL;
}
コード例 #21
0
ファイル: vdec.c プロジェクト: SelfImp/m75
int m4u_v2p_new(unsigned int va)
{
   unsigned int pmdOffset = (va & (PMD_SIZE - 1));
   unsigned int pageOffset = (va & (PAGE_SIZE - 1));
   pgd_t *pgd;
   pmd_t *pmd;
   pte_t *pte;
   unsigned int pa;
   printk("Enter m4u_user_v2p()! 0x%x\n", va);
   
   pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
   printk("m4u_user_v2p(), pgd 0x%x\n", pgd);    
   printk("pgd_none=%d, pgd_bad=%d\n", pgd_none(*pgd), pgd_bad(*pgd));
   
   if(pgd_none(*pgd)||pgd_bad(*pgd))
   {
      printk("Error: m4u_user_v2p(), virtual addr 0x%x, pgd invalid! \n", va);
      return 0;
   }
   
   pmd = pmd_offset(pgd, va);
   printk("m4u_user_v2p(), pmd 0x%x\n", pmd);
   printk("pmd_none=%d, pmd_bad=%d, pmd_val=0x%x\n", pmd_none(*pmd), pmd_bad(*pmd), pmd_val(*pmd));
   
   
   /* If this is a page table entry, keep on walking to the next level */ 
   if (( (unsigned int)pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
   {
      if(pmd_none(*pmd)||pmd_bad(*pmd))
      {
         printk("Error: m4u_user_v2p(), virtual addr 0x%x, pmd invalid! \n", va);
         return 0;
      }
      
      pte = pte_offset_map(pmd, va);
      printk("m4u_user_v2p(), pte 0x%x\n", pte);
      if(pte_present(*pte)) 
      { 
         pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; 
         printk("PA = 0x%8x\n", pa);
         return pa; 
      }
   }
   else /* Only 1 level page table */
   {
      if(pmd_none(*pmd))
      {
         printk("Error: m4u_user_v2p(), virtual addr 0x%x, pmd invalid! \n", va);
         return 0;
      }
      pa=(pte_val(*pmd) & (PMD_MASK)) | pmdOffset; 
      printk("PA = 0x%8x\n", pa);
      return pa;

   }
   
   return 0;   
}
コード例 #22
0
ファイル: fault.c プロジェクト: kizukukoto/WDN900_GPL
/*
 * fault_is_resolved()
 *	Return true if the fault appears to be resolved.
 */
STATIC int fault_is_resolved(struct pt_regs *regs,
			 unsigned long missqw0,
			 unsigned long missqw1)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *ptep;
	unsigned long address;
	unsigned long src = MMU_MISSQW1_SRC_GET(missqw1);
	unsigned long op = MMU_MISSQW0_OP_GET(missqw0);

	/*
	 * Potential hardware bug, check if this is an ifetch with a write op.
	 * If so, we will be in an infinite loop.  check here because this
	 * is under debug.
	 */
	if ((src == 0) && (op == 1)) {
		printk(KERN_CRIT "iftech/write: missqw0=%lx, missqw1=%lx\n",
		       missqw0, missqw1);
		return 0;
	}

	/*
	 * See if we now have a valid pte?
	 */
	pgd = (pgd_t *)(MMU_MISSQW0_PGD_GET(missqw0) << MMU_MISSQW0_PGD_SHIFT);
	address =  (unsigned long)(MMU_MISSQW1_VPN_GET(missqw1) << MMU_VPN_SHIFT);
	pmd = (pmd_t *)__pgd_offset(pgd, address);
	if (unlikely(pmd_none(*pmd)) || (unlikely(pmd_bad(*pmd)))) {
		printk(KERN_CRIT "address[0x%lx] pgd[%p] pmd[%p] is empty\n",
		       address, pgd, pmd);
		return 0;
	}

	ptep = pte_offset_map(pmd, address);
	if (unlikely(pte_none(*ptep)) || (unlikely(pte_bad(*ptep)))) {
		printk(KERN_CRIT "address[0x%lx] pgd[%p] pmd[%p] pte[%p] is empty\n",
		       address, pgd, pmd, ptep);
		return 0;
	}

	if (unlikely(!pte_present(*ptep))) {
		printk(KERN_CRIT "address[0x%lx] pgd[%p] pmd[%p] pte[%p] is invalid: 0x%lx\n",
		       address, pgd, pmd, ptep, pte_val(*ptep));
		return 0;
	}

	if (MMU_MISSQW0_OP_GET(missqw0) && !pte_write(*ptep)) {
		printk(KERN_CRIT "address[0x%lx] pgd[%p] pmd[%p] pte[%p] write requested but not given: 0x%lx\n",
		       address, pgd, pmd, ptep, pte_val(*ptep));
		/* Fall through, not as critical */
	}

	fault_printk(FAULT_DBG_TRACE, "FAULT[%d]: ti[%p], missqw0=%08lx, missqw1=%08lx, resolved!\n", 
	       raw_smp_processor_id(), (void *)current_thread_info(), missqw0, missqw1);
	return 1;
}
コード例 #23
0
ファイル: my_syscall.c プロジェクト: amastov/CSE430P3
asmlinkage long sys_my_syscall( int pid, unsigned long address)
{
   
   struct task_struct* task;
   struct mm_struct* mm;
   pgd_t* pgd;
   pud_t* pud;
   pmd_t* pmd;
   pte_t* pte;
   unsigned long pte_val ;
   printk(KERN_INFO "PID: %d, VIRTUAL_ADDR: 0x%lx\n", pid, address);
   for_each_process(task)
   {
     if(task->pid == pid)
     {           
      printk(KERN_INFO "Task %d found\n", task->pid);
      mm = task->mm;
      
      pgd = pgd_offset(mm, address);
      printk(KERN_INFO "PGD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pgd_present(*pgd), pgd_bad(*pgd), pgd_none(*pgd));
      if(!(pgd_none(*pgd) || pgd_bad(*pgd)) && pgd_present(*pgd))
      {
        printk(KERN_INFO "PGD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pgd_present(*pgd), pgd_bad(*pgd), pgd_none(*pgd));
        pud = pud_offset(pgd, address);
        printk(KERN_INFO "PUD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pud_present(*pud), pud_bad(*pud), pud_none(*pud));
           
        if(!(pud_none(*pud) || pud_bad(*pud)) && pud_present(*pud))
        {
          printk(KERN_INFO "PUD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pud_present(*pud), pud_bad(*pud), pud_none(*pud));
          pmd = pmd_offset(pud, address);
          printk(KERN_INFO "PMD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pmd_present(*pmd), pmd_bad(*pmd), pmd_none(*pmd));
           
          if(!(pmd_none(*pmd) || pmd_bad(*pmd)) && pmd_present(*pmd))
          {
            printk(KERN_INFO "PMD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pmd_present(*pmd), pmd_bad(*pmd), pmd_none(*pmd));
            pte = pte_offset_map(pmd, address);
            printk(KERN_INFO "PTE INFO: PRESENT: %d PTE: 0x%lx \n ", pte_present(*pte), pte->pte);


            pte_val = pte->pte;
            
            if(pte_val == 0)
                pte_val = __pte_to_swp_entry(*pte).val;
            
            pte_unmap(pte);
            printk(KERN_INFO "pte_val: %lx\n" , pte_val);
            return pte_val;
            
            }
        }
      }
    }
  }
  printk(KERN_INFO "Data not found!\n");
  return 0;
}
コード例 #24
0
ファイル: fault.c プロジェクト: kizukukoto/WDN900_GPL
/*
 * fault_pte()
 *	Obtain the pte corresponding to the fault.
 */
STATIC pte_t *fault_pte(pgd_t *pgd, unsigned long address)
{
	pmd_t *pmd = (pmd_t *)__pgd_offset(pgd, address);

	if (unlikely(pmd_none(*pmd)) ||
	    (unlikely(pmd_bad(*pmd))) ||
	    (unlikely(pmd_present(*pmd) == 0))) {
		return 0;
	}
	return pte_offset_map(pmd, address);
}
コード例 #25
0
unsigned int pmem_user_v2p_video(unsigned int va)
{
    unsigned int pageOffset = (va & (PAGE_SIZE - 1)); 
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned int pa;

    if(NULL==current)
    {
    	  MFV_LOGE("[ERROR] pmem_user_v2p_video, current is NULL! \n");
    	  return 0;
    }
    
    if(NULL==current->mm)
    {
    	  MFV_LOGE("[ERROR] pmem_user_v2p_video, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
    	  return 0;
    }
    
    pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
    if(pgd_none(*pgd)||pgd_bad(*pgd))
    {    
        MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pgd invalid! \n", va); 
        return 0;
    }    

    pud = pud_offset(pgd, va); 
    if(pud_none(*pud)||pud_bad(*pud))
    {    
        MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pud invalid! \n", va); 
        return 0;
    }    
    
    pmd = pmd_offset(pud, va); 
    if(pmd_none(*pmd)||pmd_bad(*pmd))
    {    
        MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pmd invalid! \n", va); 
        return 0;
    }    
      
    pte = pte_offset_map(pmd, va); 
    if(pte_present(*pte)) 
    {    
        pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; 
        pte_unmap(pte);
        return pa;  
    }     

    MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pte invalid! \n", va); 
    return 0;
}
コード例 #26
0
ファイル: mmu.c プロジェクト: BrainFucking/linux
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
				  unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  void *(*alloc)(unsigned long size))
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
	if (pud_none(*pud) || pud_sect(*pud)) {
		pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
		pud_populate(mm, pud, pmd);
		flush_tlb_all();
	}
	BUG_ON(pud_bad(*pud));

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
			pmd_t old_pmd =*pmd;
			set_pmd(pmd, __pmd(phys |
					   pgprot_val(mk_sect_prot(prot))));
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				if (pmd_table(old_pmd)) {
					phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
}
コード例 #27
0
ファイル: signal.c プロジェクト: GodFox/magx_kernel_xpixl
static int page_present(struct mm_struct *mm, void __user *uptr, int wr)
{
	unsigned long addr = (unsigned long)uptr;
	pgd_t *pgd = pgd_offset(mm, addr);
	if (pgd_present(*pgd)) {
		pmd_t *pmd = pmd_offset(pgd, addr);
		if (pmd_present(*pmd)) {
			pte_t *pte = pte_offset_map(pmd, addr);
			return (pte_present(*pte) && (!wr || pte_write(*pte)));
		}
	}
	return 0;
}
コード例 #28
0
ファイル: hugetlbpage.c プロジェクト: kzlin129/tt-gpl
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	if (pgd) {
		pmd = pmd_offset(pgd, addr);
		if (pmd)
			pte = pte_offset_map(pmd, addr);
	}
	return pte;
}
コード例 #29
0
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/*                                        */
	off = start & ~PAGE_MASK;
	size = end - start;

	/*                                                             
           */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/*                                                            
           */
	mn10300_local_dcache_flush_range2(addr + off, size);
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}
コード例 #30
0
/**
 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
 *				single page
 * @start: The starting virtual address of the page part.
 * @end: The ending virtual address of the page part.
 *
 * Flush the dcache and invalidate the icache for part of a single page, as
 * determined by the virtual addresses given.  The page must be in the paged
 * area.
 */
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/* work out how much of the page to flush */
	off = start & ~PAGE_MASK;
	size = end - start;

	/* get the physical address the page is mapped to from the page
	 * tables */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/* flush the dcache and invalidate the icache coverage on that
	 * region */
	mn10300_local_dcache_flush_range2(addr + off, size);
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}