Esempio n. 1
0
/*
 * This routine puts a long into any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 * Now keeps R/W state of page so that a text page stays readonly
 * even if a debugger scribbles breakpoints into it.  -M.U-
 */
static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
	unsigned long data)
{
	pgd_t *pgdir;
	pmd_t *pgmiddle;
	pte_t *pgtable;
	unsigned long page;
		
repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (!pgd_present(*pgdir)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return;
	}
	pgmiddle = pmd_offset(pgdir,addr);
	if (pmd_none(*pgmiddle)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace: bad page directory %08lx\n",
		       pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	page = pte_page(*pgtable);
	if (!pte_write(*pgtable)) {
		do_wp_page(tsk, vma, addr, 2);
		goto repeat;
	}
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page < high_memory) {
		*(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
		flush_page_to_ram (page);
	}
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
	*pgtable = pte_mkdirty(mk_pte(page, vma->vm_page_prot));
	flush_tlb_all();
}
Esempio n. 2
0
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp = NULL;

	pgdp = pgd_offset(mm, addr);
	if (pgd_present(*pgdp)) {
		pudp = pud_offset(pgdp, addr);
		if (pud_present(*pudp))
			pmdp = pmd_offset(pudp, addr);
	}
	return (pte_t *) pmdp;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
	pte = pte_offset_kernel(pmd, addr);

	return pte;
}
Esempio n. 4
0
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	if (pgd) {
		pmd = pmd_offset(pgd, addr);
		if (pmd)
			pte = pte_offset_map(pmd, addr);
	}
	return pte;
}
Esempio n. 5
0
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (pud)
		pte = (pte_t *) pmd_alloc(mm, pud, addr);
	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));

	return pte;
}
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
		      unsigned long sz)
{
	pgd_t *pgd;
	pud_t *pud;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (pud)
		pte = (pte_t *)pmd_alloc(mm, pud, addr);

	return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd = NULL;

	pgd = pgd_offset(mm, addr);
	if (pgd_present(*pgd)) {
		pud = pud_offset(pgd, addr);
		if (pud_present(*pud))
			pmd = pmd_offset(pud, addr);
	}
	return (pte_t *) pmd;
}
Esempio n. 8
0
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = alloc_pgd_table();
	if (!new_pgd)
		goto no_pgd;

	/*
	 * On ARM, first page must always be allocated since it contains
	 * the machine vectors.
	 */
	new_pmd = pmd_alloc(mm, new_pgd, 0);
	if (!new_pmd)
		goto no_pmd;

	new_pte = pte_alloc_map(mm, new_pmd, 0);
	if (!new_pte)
		goto no_pte;

	init_pgd = pgd_offset(&init_mm, 0);
	init_pmd = pmd_offset(init_pgd, 0);
	init_pte = pte_offset(init_pmd, 0);

	set_pte(new_pte, *init_pte);
	pte_unmap(new_pte);

	/*
	 * the page table entries are zeroed
	 * when the table is created. (see the cache_ctor functions below)
	 * Now we need to plonk the kernel (vmalloc) area at the end of
	 * the address space. We copy this from the init thread, just like
	 * the init_pte we copied above...
	 */
	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
		(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));

	/* update MEMC tables */
	cpu_memc_update_all(new_pgd);
	return new_pgd;

no_pte:
	pmd_free(new_pmd);
no_pmd:
	free_pgd_slow(new_pgd);
no_pgd:
	return NULL;
}
Esempio n. 9
0
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *ret, *init;

	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
	if (ret) {
		init = pgd_offset(&init_mm, 0UL);
		pgd_init(ret);
		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
	}

	return ret;
}
Esempio n. 10
0
unsigned long virtaddr_to_physaddr(struct mm_struct *mm, unsigned long vaddr)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned long paddr = 0;

    pgd = pgd_offset(mm, vaddr);
    printk("pgd_val = 0x%lx\n", pgd_val(*pgd));
    printk("pgd_index = %lu\n", pgd_index(vaddr));
    if (pgd_none(*pgd)) {
        printk("not mapped in pgd\n");
        return INVALID_ADDR;
    }

    pud = pud_offset(pgd, vaddr);
    printk("pud_val = 0x%lx\n", pud_val(*pud));
    printk("pud_index = %lu\n", pud_index(vaddr));
    if (pud_none(*pud)) {
        printk("not mapped in pud\n");
        return INVALID_ADDR;
    }

    pmd = pmd_offset(pud, vaddr);
    printk("pmd_val = 0x%lx\n", pmd_val(*pmd));
    printk("pmd_index = %lx\n", pmd_index(vaddr));
    if(pmd_none(*pmd)){
        printk("not mapped in pmd\n");
        return INVALID_ADDR;
    }
    /*If pmd_large is true, represent pmd is the last level*/
    if(pmd_large(*pmd)){
        paddr = (pmd_val(*pmd) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
        return paddr;
    }
    /*Walk the forth level page table
    ** you may use PAGE_MASK = 0xfffffffffffff000 to help you get [0:11] bits
    ***/
    else{
        /*Need to implement*/
        /*...................*/
        /*...................*/
        /*End of implement*/
        return paddr;
    }

}
Esempio n. 11
0
int __init wip_init(void)
{
	unsigned long va = 0xb77e5000;
	int pid = 1072;
	//struct page p;
	unsigned long long pageFN;
	unsigned long long pa;

	pgd_t *pgd;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *pte;
	
	struct mm_struct *mm;

	int found = 0;

	struct task_struct *task;
	for_each_process(task)
	{
		if(task->pid == pid)
			mm = task->mm;
	}
	pgd  = pgd_offset(mm,va);
	if(!pgd_none(*pgd) && !pgd_bad(*pgd))
	{
		pud = pud_offset(pgd,va);
		if(!pud_none(*pud) && !pud_bad(*pud))
		{
			pmd = pmd_offset(pud,va);
			if(!pmd_none(*pmd) && !pmd_bad(*pmd))
			{
				pte = pte_offset_kernel(pmd,va);
				if(!pte_none(*pte))
				{
					pageFN = pte_pfn(*pte);
					pa = ((pageFN<<12)|(va&0x00000FFF));
					found = 1;
					printk(KERN_ALERT "Physical Address: 0x%08llx\npfn: 0x%04llx\n", pa, pageFN);
				}
			}
		}
	}
	if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
	{
		unsigned long long swapID = (pte_val(*pte) >> 32);
		found = 1;
		printk(KERN_ALERT "swap ID: 0x%08llx\n", swapID);
	}
Esempio n. 12
0
int new_page_tables(struct task_struct * tsk)
{
	pgd_t * page_dir, * new_pg;

	if (!(new_pg = pgd_alloc()))
		return -ENOMEM;
	page_dir = pgd_offset(&init_mm, 0);
	flush_cache_mm(tsk->mm);
	memcpy(new_pg + USER_PTRS_PER_PGD, page_dir + USER_PTRS_PER_PGD,
	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof (pgd_t));
	flush_tlb_mm(tsk->mm);
	SET_PAGE_DIR(tsk, new_pg);
	tsk->mm->pgd = new_pg;
	return 0;
}
Esempio n. 13
0
static hugepte_t *hugepte_alloc(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pmd_t *pmd = NULL;

	BUG_ON(!in_hugepage_area(mm->context, addr));

	pgd = pgd_offset(mm, addr);
	pmd = pmd_alloc(mm, pgd, addr);

	/* We shouldn't find a (normal) PTE page pointer here */
	BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));
	
	return (hugepte_t *)pmd;
}
Esempio n. 14
0
static void unswap_process(struct mm_struct * mm, unsigned long entry, 
			   unsigned long page /* , int isswap */)
{
	struct vm_area_struct* vma;

	/*
	 * Go through process' page directory.
	 */
	if (!mm || mm == &init_mm)
		return;
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		pgd_t * pgd = pgd_offset(mm, vma->vm_start);
		unswap_vma(vma, pgd, entry, page /* , isswap */);
	}
}
Esempio n. 15
0
/*
 * Do a quick page-table lookup for a single page. 
 */
static struct page * follow_page(unsigned long address) 
{
	pgd_t *pgd;
	pmd_t *pmd;

	pgd = pgd_offset(current->mm, address);
	pmd = pmd_offset(pgd, address);
	if (pmd) {
		pte_t * pte = pte_offset(pmd, address);
		if (pte && pte_present(*pte))
			return pte_page(*pte);
	}
	
	return NULL;
}
Esempio n. 16
0
static int pag_unlock(unsigned long addr)
{
	    unsigned long page;
	    unsigned long kva;

	    kva = uvirt_to_kva(pgd_offset(current->mm, addr), addr);
	    if(kva)
	    {
		page = uvirt_to_pa((unsigned long)addr);
		UnlockPage(virt_to_page(__va(page)));
		ClearPageReserved(virt_to_page(__va(page)));
		return 0;
	    }
	    return EPERM;
}
Esempio n. 17
0
/**
 * @brief   Translate user virtual address to physical address.
 * @param   va [in] user virtual address.
 * @return 	success: physical address, fail: 0
 * @see     CHUNK_MEM_FREE
 */
unsigned int gp_user_va_to_pa(void *va)
{
	pgd_t *pgd = NULL;
	pud_t *pud = NULL;
	pmd_t *pmd = NULL;
	pte_t *pte = NULL;
	struct mm_struct *mm = current->mm;
	unsigned int addr = (unsigned int)va;
	unsigned int pa = 0;

	down_read(&mm->mmap_sem);

	/* query page tables */
	if (!find_vma(mm, addr)) {
		DIAG_VERB("virt_addr %08X not available.\n", addr);
		goto out;
	}
	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd)) {
		DIAG_VERB("Not mapped in pgd.\n");
		goto out;
	}
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud)) {
		DIAG_VERB("Not mapped in pud.\n");
		goto out;
	}
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd)) {
		DIAG_VERB("Not mapped in pmd.\n");
		goto out;
	}
	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte)) {
		DIAG_VERB("Not mapped in pte.\n");
		goto out;
	}
	if (!pte_present(*pte)) {
		DIAG_VERB("pte not in RAM.\n");
		goto out;
	}

	pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);

out:
	up_read(&mm->mmap_sem);
	return pa;
}
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/*                                        */
	off = start & ~PAGE_MASK;
	size = end - start;

	/*                                                             
           */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/*                                                            
           */
	mn10300_local_dcache_flush_range2(addr + off, size);
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}
Esempio n. 19
0
/**
 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
 *				single page
 * @start: The starting virtual address of the page part.
 * @end: The ending virtual address of the page part.
 *
 * Flush the dcache and invalidate the icache for part of a single page, as
 * determined by the virtual addresses given.  The page must be in the paged
 * area.
 */
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/* work out how much of the page to flush */
	off = start & ~PAGE_MASK;
	size = end - start;

	/* get the physical address the page is mapped to from the page
	 * tables */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/* flush the dcache and invalidate the icache coverage on that
	 * region */
	mn10300_local_dcache_flush_range2(addr + off, size);
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}
unsigned long pmem_user_v2p_video(unsigned long va)
{
    unsigned long pageOffset = (va & (PAGE_SIZE - 1));
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned long pa;

    if (NULL == current) {
				MODULE_MFV_LOGE("[ERROR] pmem_user_v2p_video, current is NULL!\n");
				return 0;
		}

    if (NULL == current->mm) {
				MODULE_MFV_LOGE("[ERROR] pmem_user_v2p_video, current->mm is NULL! tgid=0x%x, name=%s\n",
				current->tgid, current->comm);
				return 0;
		}

    pgd = pgd_offset(current->mm, va);  /* what is tsk->mm */
    if (pgd_none(*pgd) || pgd_bad(*pgd)) {
				MODULE_MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%lx, pgd invalid!\n", va);
				return 0;
		}

    pud = pud_offset(pgd, va);
    if (pud_none(*pud) || pud_bad(*pud)) {
				MODULE_MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%lx, pud invalid!\n", va);
				return 0;
		}

    pmd = pmd_offset(pud, va);
    if (pmd_none(*pmd) || pmd_bad(*pmd)) {
				MODULE_MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%lx, pmd invalid!\n", va);
				return 0;
		}

    pte = pte_offset_map(pmd, va);
    if (pte_present(*pte)) {
				pa = (pte_val(*pte) & PHYS_MASK & (PAGE_MASK)) | pageOffset;
				pte_unmap(pte);
				return pa;
		}

		MODULE_MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%lx, pte invalid!\n", va);
		return 0;
}
Esempio n. 21
0
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
			   pte_t *ptep, size_t *pgsize)
{
	pgd_t *pgdp = pgd_offset(mm, addr);
	pud_t *pudp;
	pmd_t *pmdp;

	*pgsize = PAGE_SIZE;
	pudp = pud_offset(pgdp, addr);
	pmdp = pmd_offset(pudp, addr);
	if ((pte_t *)pmdp == ptep) {
		*pgsize = PMD_SIZE;
		return CONT_PMDS;
	}
	return CONT_PTES;
}
Esempio n. 22
0
static void unuse_process(struct mm_struct * mm,
			swp_entry_t entry, struct page* page)
{
	struct vm_area_struct* vma;

	/*
	 * Go through process' page directory.
	 */
	spin_lock(&mm->page_table_lock);
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		pgd_t * pgd = pgd_offset(mm, vma->vm_start);
		unuse_vma(vma, pgd, entry, page);
	}
	spin_unlock(&mm->page_table_lock);
	return;
}
Esempio n. 23
0
/*
 * Walking through page table.
 */
static void clear_page_range(struct vm_area_struct *vma)
{
	pgd_t *pgd;
	unsigned long next, addr, end;

	addr = vma->vm_start;
	end = vma->vm_end;

	pgd = pgd_offset(vma->vm_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none(*pgd))
			continue;
		next = clear_pud_range(vma, pgd, addr, next);
	} while (pgd++, addr = next, addr != end);
}
Esempio n. 24
0
static inline int check_pgd_range(struct mm_struct *mm,
		unsigned long addr, unsigned long end, unsigned long *nodes)
{
	pgd_t *pgd;
	unsigned long next;

	pgd = pgd_offset(mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
		if (check_pud_range(mm, pgd, addr, next, nodes))
			return -EIO;
	} while (pgd++, addr = next, addr != end);
	return 0;
}
Esempio n. 25
0
static void flush_kernel_vm_range(unsigned long start, unsigned long end, 
				  int update_seq)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long addr;
	int updated = 0, err;

	mm = &init_mm;
	for(addr = start; addr < end;){
		pgd = pgd_offset(mm, addr);
		pmd = pmd_offset(pgd, addr);
		if(pmd_present(*pmd)){
			pte = pte_offset_kernel(pmd, addr);
			if(!pte_present(*pte) || pte_newpage(*pte)){
				updated = 1;
				err = os_unmap_memory((void *) addr, 
						      PAGE_SIZE);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
				if(pte_present(*pte))
					map_memory(addr, 
						   pte_val(*pte) & PAGE_MASK,
						   PAGE_SIZE, 1, 1, 1);
			}
			else if(pte_newprot(*pte)){
				updated = 1;
				protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
			}
			addr += PAGE_SIZE;
		}
		else {
			if(pmd_newpage(*pmd)){
				updated = 1;
				err = os_unmap_memory((void *) addr, PMD_SIZE);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr += PMD_SIZE;
		}
	}
	if(updated && update_seq) atomic_inc(&vmchange_seq);
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_offset(mm, addr);
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
	pte = pte_alloc_map(mm, pmd, addr);
	pgd->pgd &= ~_PAGE_SZ_MASK;
	pgd->pgd |= _PAGE_SZHUGE;

	return pte;
}
unsigned int
vtop(void *address)
{
	pgd_t	*pgd;
	pmd_t	*pmd;
	pte_t	*pte;
	unsigned int addr, paddr;

	addr = (unsigned long) address;
	pgd = pgd_offset(current->mm, addr);
	pmd = pmd_offset(pgd, addr);
	pte = pte_offset(pmd, addr);
	paddr = (KSEG1 | (unsigned int) pte_val(*pte)) & PAGE_MASK;
	paddr |= (addr & ~PAGE_MASK);

	return paddr;
}
Esempio n. 28
0
/*
 * By the time we get here, we already hold the mm semaphore
 */
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
	unsigned long address, int write_access)
{
	int ret = -1;
	pgd_t *pgd;
	pmd_t *pmd;

	pgd = pgd_offset(mm, address);
	pmd = pmd_alloc(pgd, address);

	if (pmd) {
		pte_t * pte = pte_alloc(pmd, address);
		if (pte)
			ret = handle_pte_fault(mm, vma, address, write_access, pte);
	}
	return ret;
}
Esempio n. 29
0
/*
 * This routine is called from the page fault handler to remove a
 * range of active mappings at this point
 */
void
remove_mapping (struct task_struct *task, unsigned long start, unsigned long end)
{
	unsigned long beg = start;
	pgd_t *dir;

	down (&task->mm->mmap_sem);
	dir = pgd_offset (task->mm, start);
	flush_cache_range (task->mm, beg, end);
	while (start < end){
		remove_mapping_pmd_range (dir, start, end - start);
		start = (start + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	}
	flush_tlb_range (task->mm, beg, end);
	up (&task->mm->mmap_sem);
}
Esempio n. 30
0
static void mincore_page_range(struct vm_area_struct *vma,
			unsigned long addr, unsigned long end,
			unsigned char *vec)
{
	unsigned long next;
	pgd_t *pgd;

	pgd = pgd_offset(vma->vm_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			mincore_unmapped_range(vma, addr, next, vec);
		else
			mincore_pud_range(vma, pgd, addr, next, vec);
		vec += (next - addr) >> PAGE_SHIFT;
	} while (pgd++, addr = next, addr != end);
}