コード例 #1
0
ファイル: zzq_vm.c プロジェクト: gowithqi/linux_kernel_mm
static struct page* my_follow_page(struct vm_area_struct *vma, unsigned long addr) {
	pud_t *pud = NULL;
	pmd_t *pmd = NULL;
	pgd_t *pgd = NULL;
	pte_t *pte = NULL;
	spinlock_t *ptl = NULL;
	struct page* page = NULL;
	struct mm_struct *mm = current->mm;
	pgd = pgd_offset(current->mm, addr);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
		goto out;
	}
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
		goto out;
	}
	printk("aaaa\n");
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
		goto out;
	}
	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
	printk("bbbb\n");
	if (!pte) goto out;
	printk("cccc\n");
	if (!pte_present(*pte)) goto unlock;
	page = pfn_to_page(pte_pfn(*pte));
	if (!page) goto unlock;
	get_page(page);
unlock:
	pte_unmap_unlock(pte, ptl);
out:
	return page;
}
コード例 #2
0
ファイル: ioremap_64.c プロジェクト: mobilipia/iods
static void shmedia_unmapioaddr(unsigned long vaddr)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	pgdp = pgd_offset_k(vaddr);
	if (pgd_none(*pgdp) || pgd_bad(*pgdp))
		return;

	pudp = pud_offset(pgdp, vaddr);
	if (pud_none(*pudp) || pud_bad(*pudp))
		return;

	pmdp = pmd_offset(pudp, vaddr);
	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
		return;

	ptep = pte_offset_kernel(pmdp, vaddr);

	if (pte_none(*ptep) || !pte_present(*ptep))
		return;

	clear_page((void *)ptep);
	pte_clear(&init_mm, vaddr, ptep);
}
コード例 #3
0
static int
pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
{
	unsigned long addr = (unsigned long)_addr;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	pud_t *pud;
	spinlock_t *ptl;

	pgd = pgd_offset(current->mm, addr);
	if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
		return 0;

	pud = pud_offset(pgd, addr);
	if (unlikely(pud_none(*pud) || pud_bad(*pud)))
		return 0;

	pmd = pmd_offset(pud, addr);
	if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
		return 0;

	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
	if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
	    !pte_write(*pte) || !pte_dirty(*pte))) {
		pte_unmap_unlock(pte, ptl);
		return 0;
	}

	*ptep = pte;
	*ptlp = ptl;

	return 1;
}
コード例 #4
0
/*
 * Dump out the page tables associated with 'addr' in mm 'mm'.
 */
void show_pte(struct mm_struct *mm, unsigned long addr)
{
    pgd_t *pgd;

    if (!mm)
        mm = &init_mm;

    pr_alert("pgd = %p\n", mm->pgd);
    pgd = pgd_offset(mm, addr);
    pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));

    do {
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;

        if (pgd_none(*pgd) || pgd_bad(*pgd))
            break;

        pud = pud_offset(pgd, addr);
        if (pud_none(*pud) || pud_bad(*pud))
            break;

        pmd = pmd_offset(pud, addr);
        printk(", *pmd=%016llx", pmd_val(*pmd));
        if (pmd_none(*pmd) || pmd_bad(*pmd))
            break;

        pte = pte_offset_map(pmd, addr);
        printk(", *pte=%016llx", pte_val(*pte));
        pte_unmap(pte);
    } while(0);

    printk("\n");
}
コード例 #5
0
ファイル: msync.c プロジェクト: Antonio-Zhou/Linux-2.6.11
static inline int filemap_sync_pmd_range(pud_t * pud,
	unsigned long address, unsigned long end, 
	struct vm_area_struct *vma, unsigned int flags)
{
	pmd_t * pmd;
	int error;

	if (pud_none(*pud))
		return 0;
	if (pud_bad(*pud)) {
		pud_ERROR(*pud);
		pud_clear(pud);
		return 0;
	}
	pmd = pmd_offset(pud, address);
	if ((address & PUD_MASK) != (end & PUD_MASK))
		end = (address & PUD_MASK) + PUD_SIZE;
	error = 0;
	do {
		error |= filemap_sync_pte_range(pmd, address, end, vma, flags);
		address = (address + PMD_SIZE) & PMD_MASK;
		pmd++;
	} while (address && (address < end));
	return error;
}
コード例 #6
0
ファイル: mmu.c プロジェクト: 1314cc/linux
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  phys_addr_t (*pgtable_alloc)(void))
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
	if (pud_none(*pud) || pud_sect(*pud)) {
		phys_addr_t pmd_phys;
		BUG_ON(!pgtable_alloc);
		pmd_phys = pgtable_alloc();
		pmd = pmd_set_fixmap(pmd_phys);
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
		__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
		flush_tlb_all();
		pmd_clear_fixmap();
	}
	BUG_ON(pud_bad(*pud));

	pmd = pmd_set_fixmap_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
		      block_mappings_allowed(pgtable_alloc)) {
			pmd_t old_pmd =*pmd;
			pmd_set_huge(pmd, phys, prot);
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				if (pmd_table(old_pmd)) {
					phys_addr_t table = pmd_page_paddr(old_pmd);
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, pgtable_alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);

	pmd_clear_fixmap();
}
コード例 #7
0
ファイル: my_syscall.c プロジェクト: amastov/CSE430P3
asmlinkage long sys_my_syscall( int pid, unsigned long address)
{
   
   struct task_struct* task;
   struct mm_struct* mm;
   pgd_t* pgd;
   pud_t* pud;
   pmd_t* pmd;
   pte_t* pte;
   unsigned long pte_val ;
   printk(KERN_INFO "PID: %d, VIRTUAL_ADDR: 0x%lx\n", pid, address);
   for_each_process(task)
   {
     if(task->pid == pid)
     {           
      printk(KERN_INFO "Task %d found\n", task->pid);
      mm = task->mm;
      
      pgd = pgd_offset(mm, address);
      printk(KERN_INFO "PGD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pgd_present(*pgd), pgd_bad(*pgd), pgd_none(*pgd));
      if(!(pgd_none(*pgd) || pgd_bad(*pgd)) && pgd_present(*pgd))
      {
        printk(KERN_INFO "PGD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pgd_present(*pgd), pgd_bad(*pgd), pgd_none(*pgd));
        pud = pud_offset(pgd, address);
        printk(KERN_INFO "PUD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pud_present(*pud), pud_bad(*pud), pud_none(*pud));
           
        if(!(pud_none(*pud) || pud_bad(*pud)) && pud_present(*pud))
        {
          printk(KERN_INFO "PUD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pud_present(*pud), pud_bad(*pud), pud_none(*pud));
          pmd = pmd_offset(pud, address);
          printk(KERN_INFO "PMD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pmd_present(*pmd), pmd_bad(*pmd), pmd_none(*pmd));
           
          if(!(pmd_none(*pmd) || pmd_bad(*pmd)) && pmd_present(*pmd))
          {
            printk(KERN_INFO "PMD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pmd_present(*pmd), pmd_bad(*pmd), pmd_none(*pmd));
            pte = pte_offset_map(pmd, address);
            printk(KERN_INFO "PTE INFO: PRESENT: %d PTE: 0x%lx \n ", pte_present(*pte), pte->pte);


            pte_val = pte->pte;
            
            if(pte_val == 0)
                pte_val = __pte_to_swp_entry(*pte).val;
            
            pte_unmap(pte);
            printk(KERN_INFO "pte_val: %lx\n" , pte_val);
            return pte_val;
            
            }
        }
      }
    }
  }
  printk(KERN_INFO "Data not found!\n");
  return 0;
}
コード例 #8
0
unsigned int pmem_user_v2p_video(unsigned int va)
{
    unsigned int pageOffset = (va & (PAGE_SIZE - 1)); 
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned int pa;

    if(NULL==current)
    {
    	  MFV_LOGE("[ERROR] pmem_user_v2p_video, current is NULL! \n");
    	  return 0;
    }
    
    if(NULL==current->mm)
    {
    	  MFV_LOGE("[ERROR] pmem_user_v2p_video, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
    	  return 0;
    }
    
    pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
    if(pgd_none(*pgd)||pgd_bad(*pgd))
    {    
        MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pgd invalid! \n", va); 
        return 0;
    }    

    pud = pud_offset(pgd, va); 
    if(pud_none(*pud)||pud_bad(*pud))
    {    
        MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pud invalid! \n", va); 
        return 0;
    }    
    
    pmd = pmd_offset(pud, va); 
    if(pmd_none(*pmd)||pmd_bad(*pmd))
    {    
        MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pmd invalid! \n", va); 
        return 0;
    }    
      
    pte = pte_offset_map(pmd, va); 
    if(pte_present(*pte)) 
    {    
        pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; 
        pte_unmap(pte);
        return pa;  
    }     

    MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pte invalid! \n", va); 
    return 0;
}
コード例 #9
0
ファイル: mmu.c プロジェクト: BrainFucking/linux
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
				  unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  void *(*alloc)(unsigned long size))
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
	if (pud_none(*pud) || pud_sect(*pud)) {
		pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
		pud_populate(mm, pud, pmd);
		flush_tlb_all();
	}
	BUG_ON(pud_bad(*pud));

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
			pmd_t old_pmd =*pmd;
			set_pmd(pmd, __pmd(phys |
					   pgprot_val(mk_sect_prot(prot))));
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				if (pmd_table(old_pmd)) {
					phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
}
コード例 #10
0
ファイル: wip_mod.c プロジェクト: ahanufhossain/cse430
int __init wip_init(void)
{
	unsigned long va = 0xb77e5000;
	int pid = 1072;
	//struct page p;
	unsigned long long pageFN;
	unsigned long long pa;

	pgd_t *pgd;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *pte;
	
	struct mm_struct *mm;

	int found = 0;

	struct task_struct *task;
	for_each_process(task)
	{
		if(task->pid == pid)
			mm = task->mm;
	}
	pgd  = pgd_offset(mm,va);
	if(!pgd_none(*pgd) && !pgd_bad(*pgd))
	{
		pud = pud_offset(pgd,va);
		if(!pud_none(*pud) && !pud_bad(*pud))
		{
			pmd = pmd_offset(pud,va);
			if(!pmd_none(*pmd) && !pmd_bad(*pmd))
			{
				pte = pte_offset_kernel(pmd,va);
				if(!pte_none(*pte))
				{
					pageFN = pte_pfn(*pte);
					pa = ((pageFN<<12)|(va&0x00000FFF));
					found = 1;
					printk(KERN_ALERT "Physical Address: 0x%08llx\npfn: 0x%04llx\n", pa, pageFN);
				}
			}
		}
	}
	if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
	{
		unsigned long long swapID = (pte_val(*pte) >> 32);
		found = 1;
		printk(KERN_ALERT "swap ID: 0x%08llx\n", swapID);
	}
コード例 #11
0
ファイル: my_syscall.c プロジェクト: ahanufhossain/cse430
asmlinkage long long sys_my_syscall(int pid, unsigned long long va)
{
	unsigned long long pageFN;
	unsigned long long pa;

	pgd_t *pgd;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *pte;
	
	struct mm_struct *mm;

	int found = 0;

	struct task_struct *task;
	for_each_process(task)
	{
		if(task->pid == pid)
			mm = task->mm;
	}
	pgd  = pgd_offset(mm,va);
	if(!pgd_none(*pgd) && !pgd_bad(*pgd))
	{
		pud = pud_offset(pgd,va);
		if(!pud_none(*pud) && !pud_bad(*pud))
		{
			pmd = pmd_offset(pud,va);
			if(!pmd_none(*pmd) && !pmd_bad(*pmd))
			{
				pte = pte_offset_kernel(pmd,va);
				if(!pte_none(*pte))
				{
					pageFN = pte_pfn(*pte);
					pa = ((pageFN<<12)|(va&0x00000FFF));
					found = 1;
					return pa;
				}
			}
		}
	}
	if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
	{
		unsigned long long swapID = (pte_val(*pte) >> 32);
		found = 1;
		return swapID;
	}
コード例 #12
0
ファイル: pagetable.c プロジェクト: baozich/scripts
/**
 * get_struct_page - Gets a struct page for a particular address
 * @address - the address of the page we need
 *
 * Two versions of this function have to be provided for working
 * between the 2.4 and 2.5 kernels. Rather than littering the
 * function with #defines, there is just two separate copies.
 * Look at the one that is relevant to the kernel you're using
 */
struct page *get_struct_page(unsigned long addr)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
	unsigned long pfn;
	struct page *page=NULL;

	mm = current->mm;
	/* Is this possible? */
	if (!mm) return NULL;

	spin_lock(&mm->page_table_lock);

	pgd = pgd_offset(mm, addr);
	if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
		pud = pud_offset(pgd, addr);
		if (!pud_none(*pud) && !pud_bad(*pud)) {
			pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
				/*
			 	* disable preemption because of potential kmap().
			 	* page_table_lock should already have disabled
			 	* preemtion.  But, be paranoid.
			 	*/
				preempt_disable();
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				pte_unmap(ptep);
				preempt_enable();
				if (pte_present(pte)) {
					pfn = pte_pfn(pte);
					if (pfn_valid(pfn))
						page = pte_page(pte);
				}
			}
		}
	}

	spin_unlock(&mm->page_table_lock);
	return page;
}
コード例 #13
0
ファイル: exmap.c プロジェクト: jbert/exmap
/* Modelled on __follow_page. Except we don't support HUGETLB and we
 * only actually use the pfn or pte, rather than getting hold of the
 * struct page. */
static int walk_page_tables(struct mm_struct *mm,
			    unsigned long address,
			    pte_t *pte_ret)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *ptep;
#ifdef HAVE_PUD_T
	pud_t *pud;
#endif

	// No support for HUGETLB as yet
	//page = follow_huge_addr(mm, address, write);
	//if (! IS_ERR(page))
	//return page;

	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		goto out;

#ifdef HAVE_PUD_T
	pud = pud_offset(pgd, address);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
		goto out;
	
	pmd = pmd_offset(pud, address);
#else
	pmd = pmd_offset(pgd, address);
#endif
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
		goto out;
	ptep = pte_offset_map(pmd, address);
	if (!ptep)
		goto out;

	*pte_ret = *ptep;
	pte_unmap(ptep);

	return 0;
out:
	return -1;
}
コード例 #14
0
static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return (pte_t *) 0x3a;

	pud = pud_offset(pgd, addr);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
		return (pte_t *) 0x3b;

	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
		return (pte_t *) 0x10;

	return pte_offset_map(pmd, addr);
}
コード例 #15
0
ファイル: graphene-ipc.c プロジェクト: gsmadhusudan/graphene
inline int make_page_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			 unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	spinlock_t *ptl;

	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		goto no_page;

	pud = pud_offset(pgd, addr);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
		goto no_page;

	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
		goto no_page;

	BUG_ON(pmd_trans_huge(*pmd));

	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
	if (!pte_present(*pte)) {
		spin_unlock(ptl);
		goto no_page;
	}

	ptep_set_wrprotect(mm, addr, pte);
	spin_unlock(ptl);
#if !defined(CONFIG_GRAPHENE_BULK_IPC) &&  LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
	my_flush_tlb_page(vma, addr);
#else
	flush_tlb_page(vma, addr);
#endif
	DEBUG("make page COW at %lx\n", addr);
	return 0;
no_page:
	return -EFAULT;
}
コード例 #16
0
static struct page* mtest_seek_page(struct vm_area_struct *vma, unsigned long addr)  
{ 
    pgd_t *pgd;  //top level page table
    pud_t *pud;  //second level page table
    pmd_t *pmd;  //third level page table       
    pte_t *pte;  //last level page table
    spinlock_t *ptl;  

    struct page *page = NULL;  
    struct mm_struct *mm = vma->vm_mm;  
    
    pgd = pgd_offset(mm, addr);           
    if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))  return NULL;  
          
    pud = pud_offset(pgd, addr);  
    if (pud_none(*pud) || unlikely(pud_bad(*pud)))  return NULL;  
                
    pmd = pmd_offset(pud, addr);  
    if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))  return NULL;
  
    pte = pte_offset_map_lock(mm, pmd, addr, &ptl);  
    if (!pte)  return NULL; 
    if (!pte_present(*pte)){
        pte_unmap_unlock(pte, ptl);
	return NULL;
    }  
          
    page = pfn_to_page(pte_pfn(*pte));  
    if (!page){  
        pte_unmap_unlock(pte, ptl);
	return NULL;
    }  
        
    get_page(page);  
    pte_unmap_unlock(pte, ptl);  
    return page;  
}  
コード例 #17
0
ファイル: dump-phys.c プロジェクト: huddy1985/dump-phys-addr
struct page *virt2page(struct mm_struct *mm, unsigned long addr)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    struct page *page = NULL;;

    pgd = pgd_offset(mm, addr);
    if (pgd_none(*pgd) || pgd_bad(*pgd))
        return NULL;

    pud = pud_offset(pgd, addr);
    if (pud_none(*pud) || pud_bad(*pud))
        return NULL;

    pmd = pmd_offset(pud, addr);
    if (pmd_none(*pmd) || pmd_bad(*pmd))
        return NULL;

    if (!(pte = pte_offset_map(pmd, addr)))
        return NULL;
    if (pte_none(*pte))
        goto out;

    if (!pte_present(*pte))
        goto out;

    if (!(page = pte_page(*pte)))
        goto out;

out:
    pte_unmap(pte);

    return page;
}
コード例 #18
0
ファイル: gup.c プロジェクト: DenisLug/mptcp
/**
 * follow_page_mask - look up a page descriptor from a user-virtual address
 * @vma: vm_area_struct mapping @address
 * @address: virtual address to look up
 * @flags: flags modifying lookup behaviour
 * @page_mask: on output, *page_mask is set according to the size of the page
 *
 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 *
 * Returns the mapped (struct page *), %NULL if no mapping exists, or
 * an error pointer if there is a mapping to something not represented
 * by a page descriptor (see also vm_normal_page()).
 */
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
			      unsigned int *page_mask)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

	*page_mask = 0;

	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
	if (!IS_ERR(page)) {
		BUG_ON(flags & FOLL_GET);
		return page;
	}

	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return no_page_table(vma, flags);

	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return no_page_table(vma, flags);
	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
		page = follow_huge_pud(mm, address, pud, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
	if (unlikely(pud_bad(*pud)))
		return no_page_table(vma, flags);

	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return no_page_table(vma, flags);
	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
		page = follow_huge_pmd(mm, address, pmd, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
		return no_page_table(vma, flags);
	if (pmd_trans_huge(*pmd)) {
		if (flags & FOLL_SPLIT) {
			split_huge_page_pmd(vma, address, pmd);
			return follow_page_pte(vma, address, pmd, flags);
		}
		ptl = pmd_lock(mm, pmd);
		if (likely(pmd_trans_huge(*pmd))) {
			if (unlikely(pmd_trans_splitting(*pmd))) {
				spin_unlock(ptl);
				wait_split_huge_page(vma->anon_vma, pmd);
			} else {
				page = follow_trans_huge_pmd(vma, address,
							     pmd, flags);
				spin_unlock(ptl);
				*page_mask = HPAGE_PMD_NR - 1;
				return page;
			}
		} else
			spin_unlock(ptl);
	}
	return follow_page_pte(vma, address, pmd, flags);
}
コード例 #19
0
int VirtToPhys(void *vaddr, int *paddrp)
{
#if defined(__KERNEL__)

    unsigned long addr = (unsigned long) vaddr;

    if (addr < P1SEG || ((addr >= VMALLOC_START) && (addr < VMALLOC_END)))
    {
	/*
	 * Find the virtual address of either a user page (<P1SEG) or VMALLOC (P3SEG)
	 *
	 * This code is based on vmalloc_to_page() in mm/memory.c
	 */
	struct mm_struct *mm;

	pgd_t *pgd;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
	pud_t *pud;
#endif
	pmd_t *pmd;
	pte_t *ptep, pte;
	
	/* Must use the correct mm based on whether this is a kernel or a userspace address */
	if (addr >= VMALLOC_START)
	    mm = &init_mm;
	else
	    mm = current->mm;

	/* Safety first! */
	if (mm == NULL)
	    return VTOP_INVALID_ARG;

	spin_lock(&mm->page_table_lock);
	
	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd) || pgd_bad(*pgd))
	    goto out;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud) || pud_bad(*pud))
	    goto out;
	
	pmd = pmd_offset(pud, addr);
#else
	pmd = pmd_offset(pgd, addr);
#endif
	if (pmd_none(*pmd) || pmd_bad(*pmd))
	    goto out;
	
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
	ptep = pte_offset(pmd, addr);
#else
	ptep = pte_offset_map(pmd, addr);
#endif
	if (!ptep) 
	    goto out;

	pte = *ptep;
	if (pte_present(pte)) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
	    pte_unmap(ptep);
#endif
	    spin_unlock(&mm->page_table_lock);
	    
	    /* pte_page() macro is broken for SH in linux 2.6.20 and later */
	    *paddrp = page_to_phys(pfn_to_page(pte_pfn(pte))) | (addr & (PAGE_SIZE-1));
	    
	    /* INSbl28636: P3 segment pages cannot be looked up with pmb_virt_to_phys()
	     * instead we need to examine the _PAGE_CACHABLE bit in the pte
	     */
	    return ((pte_val(pte) & _PAGE_CACHABLE) ? VTOP_INCOHERENT_MEM : VTOP_SUCCESS);
	}
	
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
	pte_unmap(ptep);
#endif
	
    out:
	spin_unlock(&mm->page_table_lock);

	/* Failed to find a pte */
	return VTOP_INVALID_ARG;
    }
    else
#if defined(CONFIG_32BIT)
    {
	unsigned long flags;
	
	/* Try looking for an ioremap() via the PMB */
	if (pmb_virt_to_phys(vaddr, (unsigned long *)paddrp, &flags) == 0)
	{
	    /* Success: Test the returned PMB flags */
	    return ((flags & PMB_C) ? VTOP_INCOHERENT_MEM : VTOP_SUCCESS);
	}

	/* Failed to find a mapping */
	return VTOP_INVALID_ARG;
    }
#else
    {
	unsigned long addr = (unsigned long) vaddr;
	
	/* Assume 29-bit SH4 Linux */
	*(paddrp)= PHYSADDR(addr);
	
	/* only the P2SEG is uncached (doubt we will see P4SEG addresses) */
	return ((PXSEG(addr) == P2SEG) ? VTOP_SUCCESS: VTOP_INCOHERENT_MEM);
    }
#endif /* CONFIG_32BIT */

#endif /* __KERNEL__ */

    /* Not implemented */
    return VTOP_INVALID_ARG;
}
コード例 #20
0
/*pgtable sequential scan and count for __access_bits.*/
static int scan_pgtable(void)
{
	pgd_t *pgd = NULL;
	pud_t *pud = NULL;
	pmd_t *pmd = NULL;
	pte_t *ptep, pte;
	spinlock_t *ptl;

	struct mm_struct *mm;
	struct vm_area_struct *vma;
	unsigned long start = 0; /*the start of address.*/
	unsigned long end = 0;   /*the end of address.*/
	unsigned long address = 0; /* the address of vma.*/
	int number_hotpages = 0; /* the number of hot pages */
	int number_vpages = 0;
	int cycle_index = 0; /* the loop counter, which denotes ITERATIONS. */
	/* the array that records the number of hot page in every cycle */
	int hot_page[ITERATIONS];
	int number_current_pg = 0;
	int pg_count = 0;
	int j = 0;
	int times = 0; /* records reuse time*/

	/* some variables that describe page "heat" */
	int hig = 0;
	int mid = 0;
	int low = 0;
	int llow = 0;
	int lllow = 0;
	int llllow = 0;
	int all_pages = 0;/* the total number of pages */
	/*the average number of hot pages in each iteration.*/
	long avg_hotpage=0;
	/*the total number of memory accesses across all pages*/
	long num_access=0;
	/* avg utilization of each page */
	int avg_page_utilization = 0;

	/*get the handle of current running benchmark.*/
	struct task_struct *bench_process = get_current_process();
	if(bench_process == NULL)
	{
		printk("sysmon: get no process handle in scan_pgtable function...exit&trying again...\n");
		return 0;
	}
	else /* get the process*/
		mm = bench_process->mm;
	if(mm == NULL)
	{
		printk("sysmon: error mm is NULL, return back & trying...\n");
		return 0;
	}

	for(j = 0; j < PAGE_ALL; j++)
		page_heat[j] = -1;

	for(j = 0; j < ITERATIONS; j++)
	{
		hot_page[j] = 0;
		reuse_time[j] = 0;
		dirty_page[j] = 0;
	}

	/*yanghao*/
	times = 0;
	for(cycle_index = 0; cycle_index < ITERATIONS; cycle_index++)
	{
		number_hotpages = 0;
		/*scan each vma*/
		for(vma = mm->mmap; vma; vma = vma->vm_next)
		{
			start = vma->vm_start;
			end = vma->vm_end;
			mm = vma->vm_mm;
			/*in each vma, we check all pages*/
			for(address = start; address < end; address += PAGE_SIZE)
			{
				/*scan page table for each page in this VMA*/
				pgd = pgd_offset(mm, address);
				if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
					continue;
				pud = pud_offset(pgd, address);
				if (pud_none(*pud) || unlikely(pud_bad(*pud)))
					continue;
				pmd = pmd_offset(pud, address);
				if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
					continue;
				ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
				pte = *ptep;
				if(pte_present(pte))
				{
					if(pte_young(pte)) /*hot page*/
					{
						/*re-set and clear  _access_bits to 0*/
						pte = pte_mkold(pte);
						set_pte_at(mm, address, ptep, pte);
						/*yanghao:re-set and clear _dirty_bits to 0*/
						pte = pte_mkclean(pte);
						set_pte_at(mm, address, ptep, pte);
					}
				}
				else /*no page pte_none*/
				{
					pte_unmap_unlock(ptep, ptl);
					continue;
				}
				pte_unmap_unlock(ptep, ptl);
				page_counts++;
			}
		}
		/*count the number of hot pages*/
		if(bench_process == NULL)
		{
			printk("sysmon: get no process handle in scan_pgtable function...exit&trying again...\n");
			return 0;
		}
		else /*get the process*/
			mm = bench_process->mm;
		if(mm == NULL)
		{
			printk("sysmon: error mm is NULL, return back & trying...\n");
			return 0;
		}
		number_vpages = 0;

		sampling_interval = page_counts / 250; /*yanghao:*/
		page_counts = 0;

		for(vma = mm->mmap; vma; vma = vma->vm_next)
		{
			start = vma->vm_start;
			end = vma->vm_end;
			/*scan each page in this VMA*/
			mm = vma->vm_mm;
			pg_count = 0;
			for(address = start; address < end; address += PAGE_SIZE)
			{
				/*scan page table for each page in this VMA*/
				pgd = pgd_offset(mm, address);
				if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
					continue;
				pud = pud_offset(pgd, address);
				if (pud_none(*pud) || unlikely(pud_bad(*pud)))
					continue;
				pmd = pmd_offset(pud, address);
				if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
					continue;
				ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
				pte = *ptep;
				if(pte_present(pte))
				{
					if(pte_young(pte)) /* hot pages*/
					{
						number_current_pg = pg_count + number_vpages;
						page_heat[number_current_pg]++;
						hot_page[cycle_index]++;
						/*yanghao:*/
						if (page_counts == random_page)
						{
							times++;
							if (pte_dirty(pte))
								dirty_page[cycle_index] = 1;
						}
					}
					else
					{
						if (page_counts == random_page)
							reuse_time[times]++;
					}
				}
				pg_count++;
				pte_unmap_unlock(ptep, ptl);
				page_counts++;
			}
			number_vpages += (int)(end - start)/PAGE_SIZE;
		}
	}
	/*yanghao:cal. the No. of random_page*/
	random_page += sampling_interval;
	if(random_page >= page_counts)
		random_page=page_counts / 300;
	/*****************************OUTPUT************************************/
	for(j = 0; j < PAGE_ALL; j++)
	{
		if(page_heat[j] < VH && page_heat[j] > H)
			hig++;
		if(page_heat[j] > M && page_heat[j] <= H)
			mid++;
		if(page_heat[j] <= M && page_heat[j] > L)
			low++;
		if(page_heat[j] > VL_MAX && page_heat[j] <= L)
			llow++;
		if(page_heat[j] > VL_MIN && page_heat[j] <= VL_MAX)
			lllow++;
		if(page_heat[j] >= 0 && page_heat[j] <= VL_MIN)
			llllow++;
		if(page_heat[j] > -1)
			all_pages++;
	}

	/*the values reflect the accessing frequency of each physical page.*/
	printk("[LOG: after sampling (%d loops) ...] ",ITERATIONS);
	printk("the values denote the physical page accessing frequence.\n"); 
	printk("-->hig (150,200) is %d. Indicating the number of re-used pages is high.\n",hig);
	printk("-->mid (100,150] is %d.\n",mid);
	printk("-->low (64,100] is %d.\n",low);
	printk("-->llow (10,64] is %d. In locality,no too many re-used pages.\n",llow);
	printk("-->lllow (5,10] is %d.\n",lllow);
	printk("-->llllow [1,5] is %d.\n",llllow);

	for(j = 0;j < ITERATIONS; j++)
		avg_hotpage += hot_page[j];
	avg_hotpage /= (j+1);

	/*
	 * new step@20140704
	 * (1)the different phases of memory utilization
	 * (2)the avg. page accessing utilization
	 * (3)memory pages layout and spectrum
	 */
	for(j = 0; j < PAGE_ALL; j++)
		if(page_heat[j] > -1) /*the page that is accessed at least once.*/
			num_access += (page_heat[j] + 1);

	printk("the total number of memory accesses is %ld, the average is %ld\n",
			num_access, num_access / ITERATIONS);
	avg_page_utilization = num_access / all_pages;
	printk("Avg hot pages num is %ld, all used pages num is %d, avg utilization of each page is %d\n", 
			avg_hotpage, all_pages, avg_page_utilization);
	/*yanghao:print the information about reuse-distance*/
	if ((times == 0) && (reuse_time[0] ==0))
		printk("the page No.%d is not available.",random_page);
	else
	{
		if ((times == 0) && (reuse_time[0] == 0))
			printk("the page No.%d was not used in this 200 loops.",random_page);
		else
		{
			if (times < ITERATIONS)
				times++;
			printk("the reusetime of page No.%d is:",random_page);
			for (j = 0; j < times; j++)
				printk("%d ",reuse_time[j]);
			printk("\n");
			printk("the total number of the digit above denotes the sum that page NO.%d be accessd in %d loops.\n",
					random_page,ITERATIONS);
			printk("each digit means the sum loops that between current loop and the last loop.\n");
		}
	}
	printk("\n\n");
	return 1;
}
コード例 #21
0
ファイル: gup.c プロジェクト: BWhitten/linux-stable
/**
 * follow_page_mask - look up a page descriptor from a user-virtual address
 * @vma: vm_area_struct mapping @address
 * @address: virtual address to look up
 * @flags: flags modifying lookup behaviour
 * @page_mask: on output, *page_mask is set according to the size of the page
 *
 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 *
 * Returns the mapped (struct page *), %NULL if no mapping exists, or
 * an error pointer if there is a mapping to something not represented
 * by a page descriptor (see also vm_normal_page()).
 */
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
			      unsigned int *page_mask)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

	*page_mask = 0;

	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
	if (!IS_ERR(page)) {
		BUG_ON(flags & FOLL_GET);
		return page;
	}

	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return no_page_table(vma, flags);
	p4d = p4d_offset(pgd, address);
	if (p4d_none(*p4d))
		return no_page_table(vma, flags);
	BUILD_BUG_ON(p4d_huge(*p4d));
	if (unlikely(p4d_bad(*p4d)))
		return no_page_table(vma, flags);
	pud = pud_offset(p4d, address);
	if (pud_none(*pud))
		return no_page_table(vma, flags);
	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
		page = follow_huge_pud(mm, address, pud, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
	if (pud_devmap(*pud)) {
		ptl = pud_lock(mm, pud);
		page = follow_devmap_pud(vma, address, pud, flags);
		spin_unlock(ptl);
		if (page)
			return page;
	}
	if (unlikely(pud_bad(*pud)))
		return no_page_table(vma, flags);

	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return no_page_table(vma, flags);
	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
		page = follow_huge_pmd(mm, address, pmd, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
	if (pmd_devmap(*pmd)) {
		ptl = pmd_lock(mm, pmd);
		page = follow_devmap_pmd(vma, address, pmd, flags);
		spin_unlock(ptl);
		if (page)
			return page;
	}
	if (likely(!pmd_trans_huge(*pmd)))
		return follow_page_pte(vma, address, pmd, flags);

	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
		return no_page_table(vma, flags);

	ptl = pmd_lock(mm, pmd);
	if (unlikely(!pmd_trans_huge(*pmd))) {
		spin_unlock(ptl);
		return follow_page_pte(vma, address, pmd, flags);
	}
	if (flags & FOLL_SPLIT) {
		int ret;
		page = pmd_page(*pmd);
		if (is_huge_zero_page(page)) {
			spin_unlock(ptl);
			ret = 0;
			split_huge_pmd(vma, pmd, address);
			if (pmd_trans_unstable(pmd))
				ret = -EBUSY;
		} else {
			get_page(page);
			spin_unlock(ptl);
			lock_page(page);
			ret = split_huge_page(page);
			unlock_page(page);
			put_page(page);
			if (pmd_none(*pmd))
				return no_page_table(vma, flags);
		}

		return ret ? ERR_PTR(ret) :
			follow_page_pte(vma, address, pmd, flags);
	}

	page = follow_trans_huge_pmd(vma, address, pmd, flags);
	spin_unlock(ptl);
	*page_mask = HPAGE_PMD_NR - 1;
	return page;
}
コード例 #22
0
unsigned int m4u_user_v2p(unsigned int va)
{
    unsigned int pageOffset = (va & (PAGE_SIZE - 1));
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned int pa;
    //M4UMSG("Enter m4u_user_v2p()! \n", va);

    if(NULL==current)
    {
    	  M4UMSG("warning: m4u_user_v2p, current is NULL! \n");
    	  return 0;
    }
    if(NULL==current->mm)
    {
    	  M4UMSG("warning: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
    	  return 0;
    }
        
    pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
    if(pgd_none(*pgd)||pgd_bad(*pgd))
    {
        M4UMSG("m4u_user_v2p(), va=0x%x, pgd invalid! \n", va);
        return 0;
    }

    pud = pud_offset(pgd, va);
    if(pud_none(*pud)||pud_bad(*pud))
    {
        M4UDBG("m4u_user_v2p(), va=0x%x, pud invalid! \n", va);
        return 0;
    }
    
    pmd = pmd_offset(pud, va);
    if(pmd_none(*pmd)||pmd_bad(*pmd))
    {
        M4UDBG("m4u_user_v2p(), va=0x%x, pmd invalid! \n", va);
        return 0;
    }
        
    pte = pte_offset_map(pmd, va);
    if(pte_present(*pte)) 
    { 
/*
        if((long long)pte_val(pte[PTE_HWTABLE_PTRS]) == (long long)0)
        {
        	M4UMSG("user_v2p, va=0x%x, *ppte=%08llx", va,
        	       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
            pte_unmap(pte);
            return 0;
        }
*/        
        pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; 
        pte_unmap(pte);
        return pa; 
    }   

    pte_unmap(pte);


    M4UDBG("m4u_user_v2p(), va=0x%x, pte invalid! \n", va);
    // m4u_dump_maps(va);
    
    return 0;
}
コード例 #23
0
ファイル: graphene-ipc.c プロジェクト: gsmadhusudan/graphene
static void fill_page_bit_map(struct mm_struct *mm,
			      unsigned long addr, unsigned long nr_pages,
			      unsigned long page_bit_map[PAGE_BITS])
{
	int i = 0;

	DEBUG("GIPC_SEND fill_page_bit_map %lx - %lx\n",
	      addr, addr + (nr_pages << PAGE_SHIFT));

	do {
		struct vm_area_struct *vma;
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;
		spinlock_t *ptl;
		bool has_page = false;

		vma = find_vma(mm, addr);
		if (!vma)
			goto next;

		BUG_ON(vma->vm_flags & VM_HUGETLB);

		pgd = pgd_offset(mm, addr);
		if (pgd_none(*pgd) || pgd_bad(*pgd))
			goto next;

		pud = pud_offset(pgd, addr);
		if (pud_none(*pud) || pud_bad(*pud))
			goto next;

		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd))
			goto next;

		if (unlikely(pmd_trans_huge(*pmd))) {
			has_page = true;
			goto next;
		}

		if (pmd_bad(*pmd))
			goto next;

		pte = pte_offset_map_lock(mm, pmd, addr, &ptl);

		if (pte_none(*pte))
			goto next_locked;

		if (unlikely(!pte_present(*pte)) && pte_file(*pte))
			goto next_locked;

		has_page = true;
next_locked:
		spin_unlock(ptl);
next:
		if (has_page) {
			DEBUG("found a page at %lx\n", addr);
			set_bit(i, page_bit_map);
		} else {
			clear_bit(i, page_bit_map);
		}
	} while (i++, addr += PAGE_SIZE, i < nr_pages);
}
コード例 #24
0
ファイル: mem_driver.c プロジェクト: minitu/mem_driver
int memory_remove_pte(struct local_page *lp) {

	unsigned long del_user_va = lp->user_va;
	unsigned long del_slab_va = lp->slab_va;
	unsigned long del_pfn = page_to_pfn(virt_to_page(del_slab_va));
	struct vm_area_struct *del_vma = lp->vma;
	struct mm_struct *del_mm = del_vma->vm_mm;

#if(DEBUG)
	printk("[%s]\n", __FUNCTION__);
	printk("del_user_va: %p\n", del_user_va);
	printk("del_slab_va: %p\n", del_slab_va);
	printk("del_pfn: %p\n", del_pfn);
	printk("del_vma: %p\n", del_vma);
	printk("del_mm: %p\n", del_mm);
#endif

	// TODO: find PTE (need to be changed for x86)
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep;

	pgd = pgd_offset(del_mm, del_user_va);
	if (pgd_none(*pgd) || pgd_bad(*pgd)) {
		printk("<error> invalid pgd\n");
		return -1;
	}

	pud = pud_offset(pgd, del_user_va);
	if (pud_none(*pud) || pud_bad(*pud)) {
		printk("<error> invalid pud\n");
		return -1;
	}

	pmd = pmd_offset(pud, del_user_va);
	if (pmd_none(*pmd) || pmd_bad(*pmd)) {
		printk("<error> invalid pmd\n");
		return -1;
	}

	ptep = pte_offset_kernel(pmd, del_user_va);
	if (!ptep) {
		printk("<error> invalid pte\n");
		return -1;
	}

#if(DEBUG)
	printk("ptep: %p\n", ptep);
	printk("pte: %p\n", *ptep);
	printk("pfn: %p\n", pte_pfn(*ptep));
#endif

	// flush cache
	flush_cache_page(del_vma, del_user_va, del_pfn);

	// clear PTE
	pte_clear(del_mm, del_user_va, ptep);

	// flush TLB
	flush_tlb_page(del_vma, del_user_va);

	return 0;
}
コード例 #25
0
ファイル: xencomm.c プロジェクト: 1703011/asuswrt-merlin
/* Translate virtual address to physical address.  */
unsigned long
xencomm_vtop(unsigned long vaddr)
{
	struct page *page;
	struct vm_area_struct *vma;

	if (vaddr == 0)
		return 0UL;

	if (REGION_NUMBER(vaddr) == 5) {
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *ptep;

		/* On ia64, TASK_SIZE refers to current.  It is not initialized
		   during boot.
		   Furthermore the kernel is relocatable and __pa() doesn't
		   work on  addresses.  */
		if (vaddr >= KERNEL_START
		    && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
			return vaddr - kernel_virtual_offset;

		/* In kernel area -- virtually mapped.  */
		pgd = pgd_offset_k(vaddr);
		if (pgd_none(*pgd) || pgd_bad(*pgd))
			return ~0UL;

		pud = pud_offset(pgd, vaddr);
		if (pud_none(*pud) || pud_bad(*pud))
			return ~0UL;

		pmd = pmd_offset(pud, vaddr);
		if (pmd_none(*pmd) || pmd_bad(*pmd))
			return ~0UL;

		ptep = pte_offset_kernel(pmd, vaddr);
		if (!ptep)
			return ~0UL;

		return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
	}

	if (vaddr > TASK_SIZE) {
		/* percpu variables */
		if (REGION_NUMBER(vaddr) == 7 &&
		    REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
			ia64_tpa(vaddr);

		/* kernel address */
		return __pa(vaddr);
	}

	vma = find_extend_vma(current->mm, vaddr);
	if (!vma)
		return ~0UL;

	/* We assume the page is modified.  */
	page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
	if (!page)
		return ~0UL;

	return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
}
コード例 #26
0
static int
pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
{
	unsigned long addr = (unsigned long)_addr;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	pud_t *pud;
	spinlock_t *ptl;

	pgd = pgd_offset(current->mm, addr);
	if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
		return 0;

	pud = pud_offset(pgd, addr);
	if (unlikely(pud_none(*pud) || pud_bad(*pud)))
		return 0;

	pmd = pmd_offset(pud, addr);
	if (unlikely(pmd_none(*pmd)))
		return 0;

	/*
	 * A pmd can be bad if it refers to a HugeTLB or THP page.
	 *
	 * Both THP and HugeTLB pages have the same pmd layout
	 * and should not be manipulated by the pte functions.
	 *
	 * Lock the page table for the destination and check
	 * to see that it's still huge and whether or not we will
	 * need to fault on write, or if we have a splitting THP.
	 */
	if (unlikely(pmd_thp_or_huge(*pmd))) {
		ptl = &current->mm->page_table_lock;
		spin_lock(ptl);
		if (unlikely(!pmd_thp_or_huge(*pmd)
			|| pmd_hugewillfault(*pmd)
			|| pmd_trans_splitting(*pmd))) {
			spin_unlock(ptl);
			return 0;
		}

		*ptep = NULL;
		*ptlp = ptl;
		return 1;
	}

	if (unlikely(pmd_bad(*pmd)))
		return 0;

	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
	if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
	    !pte_write(*pte) || !pte_dirty(*pte))) {
		pte_unmap_unlock(pte, ptl);
		return 0;
	}

	*ptep = pte;
	*ptlp = ptl;

	return 1;
}
コード例 #27
0
ファイル: gup.c プロジェクト: LarryShang/linux
/**
 * follow_page_mask - look up a page descriptor from a user-virtual address
 * @vma: vm_area_struct mapping @address
 * @address: virtual address to look up
 * @flags: flags modifying lookup behaviour
 * @page_mask: on output, *page_mask is set according to the size of the page
 *
 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 *
 * Returns the mapped (struct page *), %NULL if no mapping exists, or
 * an error pointer if there is a mapping to something not represented
 * by a page descriptor (see also vm_normal_page()).
 */
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
			      unsigned int *page_mask)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

	*page_mask = 0;

	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
	if (!IS_ERR(page)) {
		BUG_ON(flags & FOLL_GET);
		return page;
	}

	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return no_page_table(vma, flags);

	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return no_page_table(vma, flags);
	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
		if (flags & FOLL_GET)
			return NULL;
		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
		return page;
	}
	if (unlikely(pud_bad(*pud)))
		return no_page_table(vma, flags);

	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return no_page_table(vma, flags);
	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
		if (flags & FOLL_GET) {
			/*
			 * Refcount on tail pages are not well-defined and
			 * shouldn't be taken. The caller should handle a NULL
			 * return when trying to follow tail pages.
			 */
			if (PageHead(page))
				get_page(page);
			else
				page = NULL;
		}
		return page;
	}
	if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
		return no_page_table(vma, flags);
	if (pmd_trans_huge(*pmd)) {
		if (flags & FOLL_SPLIT) {
			split_huge_page_pmd(vma, address, pmd);
			return follow_page_pte(vma, address, pmd, flags);
		}
		ptl = pmd_lock(mm, pmd);
		if (likely(pmd_trans_huge(*pmd))) {
			if (unlikely(pmd_trans_splitting(*pmd))) {
				spin_unlock(ptl);
				wait_split_huge_page(vma->anon_vma, pmd);
			} else {
				page = follow_trans_huge_pmd(vma, address,
							     pmd, flags);
				spin_unlock(ptl);
				*page_mask = HPAGE_PMD_NR - 1;
				return page;
			}
		} else
			spin_unlock(ptl);
	}
	return follow_page_pte(vma, address, pmd, flags);
}