Пример #1
0
void arch_exit_mmap(struct mm_struct *mm)
{
	pte_t *pte;

	pte = virt_to_pte(mm, STUB_CODE);
	if (pte != NULL)
		pte_clear(mm, STUB_CODE, pte);

	pte = virt_to_pte(mm, STUB_DATA);
	if (pte == NULL)
		return;

	pte_clear(mm, STUB_DATA, pte);
}
Пример #2
0
static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
{
	pte_t * pte;
	unsigned long end;

	if (pmd_none(*pmd))
		return;
	if (pmd_bad(*pmd)) {
		printk("free_area_pte: bad pmd (%08lx)\n", pmd_val(*pmd));
		pmd_clear(pmd);
		return;
	}
	pte = pte_offset(pmd, address);
	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	while (address < end) {
		pte_t page = *pte;
		pte_clear(pte);
		address += PAGE_SIZE;
		pte++;
		if (pte_none(page))
			continue;
		if (pte_present(page)) {
			free_page(pte_page(page));
			continue;
		}
		printk("Whee.. Swapped out page in kernel page table\n");
	}
}
Пример #3
0
/*
 * copy_user_page
 * @to: P1 address
 * @from: P1 address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void copy_user_page(void *to, void *from, unsigned long address, 
		    struct page *page)
{
	__set_bit(PG_mapped, &page->flags);
	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		copy_page(to, from);
	else {
		pgprot_t pgprot = __pgprot(_PAGE_PRESENT | 
					   _PAGE_RW | _PAGE_CACHABLE |
					   _PAGE_DIRTY | _PAGE_ACCESSED | 
					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
		unsigned long phys_addr = PHYSADDR(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *dir = pgd_offset_k(p3_addr);
		pmd_t *pmd = pmd_offset(dir, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
		set_pte(pte, entry);
		local_irq_save(flags);
		__flush_tlb_page(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__copy_user_page((void *)p3_addr, from, to);
		pte_clear(&init_mm, p3_addr, pte);
		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
	}
}
Пример #4
0
static inline void zap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size)
{
	pte_t * pte;

	if (pmd_none(*pmd))
		return;
	if (pmd_bad(*pmd)) {
		printk("zap_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
		pmd_clear(pmd);
		return;
	}
	pte = pte_offset(pmd, address);
	address &= ~PMD_MASK;
	if (address + size > PMD_SIZE)
		size = PMD_SIZE - address;
	size >>= PAGE_SHIFT;
	for (;;) {
		pte_t page;
		if (!size)
			break;
		page = *pte;
		pte++;
		size--;
		if (pte_none(page))
			continue;
		pte_clear(pte-1);
		free_pte(page);
	}
}
Пример #5
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	unsigned long idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		pagefault_enable();
		return;
	}

	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(&init_mm, vaddr, kmap_pte-idx);
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif
#endif

	pagefault_enable();
}
/*
 * copy_user_page
 * @to: kernel logical address
 * @from: kernel logical address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void copy_user_page(void *to, void *from, unsigned long address,
		    struct page *page)
{
	extern void __copy_page_wb(void *to, void *from);

	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		__copy_page_wb(to, from);
	else {
		pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
					   _PAGE_RW | _PAGE_CACHABLE |
					   _PAGE_DIRTY | _PAGE_ACCESSED |
					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
		unsigned long phys_addr = virt_to_phys(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *pgd = pgd_offset_k(p3_addr);
		pud_t *pud = pud_offset(pgd, p3_addr);
		pmd_t *pmd = pmd_offset(pud, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
		inc_preempt_count();
BUG_ON(atomic_inc_return(&concurreny_check[(address & CACHE_ALIAS)>>12]) != 1);
		set_pte(pte, entry);
		local_irq_save(flags);
		flush_tlb_one(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__copy_user_page((void *)p3_addr, from, to);
		pte_clear(&init_mm, p3_addr, pte);
atomic_dec(&concurreny_check[(address & CACHE_ALIAS)>>12]);
		dec_preempt_count();
	}
}
Пример #7
0
static void shmedia_unmapioaddr(unsigned long vaddr)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	pgdp = pgd_offset_k(vaddr);
	if (pgd_none(*pgdp) || pgd_bad(*pgdp))
		return;

	pudp = pud_offset(pgdp, vaddr);
	if (pud_none(*pudp) || pud_bad(*pudp))
		return;

	pmdp = pmd_offset(pudp, vaddr);
	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
		return;

	ptep = pte_offset_kernel(pmdp, vaddr);

	if (pte_none(*ptep) || !pte_present(*ptep))
		return;

	clear_page((void *)ptep);
	pte_clear(&init_mm, vaddr, ptep);
}
Пример #8
0
/*
 * Associate a virtual page frame with a given physical page frame
 * and protection flags for that frame.
 */
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;

    pgd = swapper_pg_dir + pgd_index(vaddr);
    if (pgd_none(*pgd)) {
        BUG();
        return;
    }
    pud = pud_offset(pgd, vaddr);
    if (pud_none(*pud)) {
        BUG();
        return;
    }
    pmd = pmd_offset(pud, vaddr);
    if (pmd_none(*pmd)) {
        BUG();
        return;
    }
    pte = pte_offset_kernel(pmd, vaddr);
    if (pgprot_val(flags))
        /* <pfn,flags> stored as-is, to permit clearing entries */
        set_pte(pte, pfn_pte(pfn, flags));
    else
        pte_clear(&init_mm, vaddr, pte);

    /*
     * It's enough to flush this one mapping.
     * (PGE mappings get flushed as well)
     */
    __flush_tlb_one(vaddr);
}
Пример #9
0
void __kunmap_atomic(void *kv)
{
	unsigned long kvaddr = (unsigned long)kv;

	if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {

		/*
		 * Because preemption is disabled, this vaddr can be associated
		 * with the current allocated index.
		 * But in case of multiple live kmap_atomic(), it still relies on
		 * callers to unmap in right order.
		 */
		int cpu_idx = kmap_atomic_idx();
		int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();

		WARN_ON(kvaddr != FIXMAP_ADDR(idx));

		pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
		local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);

		kmap_atomic_idx_pop();
	}

	pagefault_enable();
	preempt_enable();
}
Пример #10
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		dec_preempt_count();
		preempt_check_resched();
		return;
	}

	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
		BUG();

	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(&init_mm, vaddr, kmap_pte-idx);
	__flush_tlb_one(vaddr);
#endif

	dec_preempt_count();
	preempt_check_resched();
}
Пример #11
0
/*
 * allocate new shmid_kernel and pgtable. protected by shm_segs[id] = NOID.
 */
static int newseg (key_t key, int shmflg, int size)
{
    struct shmid_kernel *shp;
    int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
    int id, i;
    pte_t tmp_pte;

    if (size < SHMMIN)
        return -EINVAL;
    if (shm_tot + numpages >= shmall)
        return -ENOSPC;
    for (id = 0; id < SHMMNI; id++)
        if (shm_segs[id] == IPC_UNUSED) {
            shm_segs[id] = (struct shmid_kernel *) IPC_NOID;
            goto found;
        }
    return -ENOSPC;

found:
    shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_KERNEL);
    if (!shp) {
        shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
        wake_up (&shm_lock);
        return -ENOMEM;
    }

    shp->shm_pages = (ulong *) vmalloc (numpages*sizeof(ulong));
    if (!shp->shm_pages) {
        shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
        wake_up (&shm_lock);
        kfree(shp);
        return -ENOMEM;
    }

    pte_clear(&tmp_pte);

    for (i = 0; i < numpages; i++)
        shp->shm_pages[i] = pte_val(tmp_pte);

    shm_tot += numpages;
    shp->u.shm_perm.key = key;
    shp->u.shm_perm.mode = (shmflg & S_IRWXUGO);
    shp->u.shm_perm.cuid = shp->u.shm_perm.uid = current->euid;
    shp->u.shm_perm.cgid = shp->u.shm_perm.gid = current->egid;
    shp->u.shm_perm.seq = shm_seq;
    shp->u.shm_segsz = size;
    shp->u.shm_cpid = current->pid;
    shp->attaches = NULL;
    shp->u.shm_lpid = shp->u.shm_nattch = 0;
    shp->u.shm_atime = shp->u.shm_dtime = 0;
    shp->u.shm_ctime = CURRENT_TIME;
    shp->shm_npages = numpages;

    if (id > max_shmid)
        max_shmid = id;
    shm_segs[id] = shp;
    used_segs++;
    wake_up (&shm_lock);
    return (unsigned int) shp->u.shm_perm.seq * SHMMNI + id;
}
Пример #12
0
/*
 * maps a range of vmalloc()ed memory into the requested pages. the old
 * mappings are removed. 
 */
static inline void
vmap_pte_range (pte_t *pte, unsigned long address, unsigned long size, unsigned long vaddr)
{
	unsigned long end;
	pgd_t *vdir;
	pmd_t *vpmd;
	pte_t *vpte;
	
	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t oldpage = *pte;
		struct page * page;
		pte_clear(pte);

		vdir = pgd_offset_k (vaddr);
		vpmd = pmd_offset (vdir, vaddr);
		vpte = pte_offset (vpmd, vaddr);
		page = pte_page (*vpte);

		set_pte(pte, mk_pte(page, PAGE_USERIO));
		forget_pte(oldpage);
		address += PAGE_SIZE;
		vaddr += PAGE_SIZE;
		pte++;
	} while (address < end);
}
Пример #13
0
void __kunmap_atomic(void *kvaddr)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	int type;

	if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
		pagefault_enable();
		return;
	}

	type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
	{
		unsigned int idx;

		idx = type + KM_TYPE_NR * smp_processor_id();
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));

		/*
		 * force other mappings to Oops if they'll try to access
		 * this pte without first remap it
		 */
		pte_clear(&init_mm, vaddr, kmap_pte-idx);
		local_flush_tlb_page(NULL, vaddr);
	}
#endif
	kmap_atomic_idx_pop();
	pagefault_enable();
}
Пример #14
0
void __kunmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		pagefault_enable();
		return;
	}

	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));

	/*
	 * Protect against multiple unmaps
	 * Can't cache flush an unmapped page.
	 */
	if ( kmap_atomic_maps[smp_processor_id()].map[type].vaddr ) {
		kmap_atomic_maps[smp_processor_id()].map[type].page = (struct page *)0;
		kmap_atomic_maps[smp_processor_id()].map[type].vaddr = (void *) 0;

		flush_data_cache_page((unsigned long)vaddr);
	}

#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(&init_mm, vaddr, kmap_pte-idx);
	local_flush_tlb_one(vaddr);
#endif

	pagefault_enable();
}
Пример #15
0
/*
 * copy_user_page
 * @to: P1 address
 * @from: P1 address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void copy_user_page(void *to, void *from, unsigned long address,
		    struct page *page)
{
	__set_bit(PG_mapped, &page->flags);
	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		copy_page(to, from);
	else {
		unsigned long phys_addr = PHYSADDR(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *pgd = pgd_offset_k(p3_addr);
		pud_t *pud = pud_offset(pgd, p3_addr);
		pmd_t *pmd = pmd_offset(pud, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
		set_pte(pte, entry);
		local_irq_save(flags);
		__flush_tlb_page(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__copy_user_page((void *)p3_addr, from, to);
		pte_clear(&init_mm, p3_addr, pte);
		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
	}
}
Пример #16
0
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
	pgd_t *pgd = pgd_offset_k(vaddr);
	pmd_t *pmd = pmd_offset(pgd, vaddr);
	pte_t *pte;
	unsigned long i;

	n_pages = ALIGN(n_pages, PTRS_PER_PTE);

	pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
		 __func__, vaddr, n_pages);

	pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);

	for (i = 0; i < n_pages; ++i)
		pte_clear(NULL, 0, pte + i);

	for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
		pte_t *cur_pte = pte + i;

		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
		BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
		pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
			 __func__, pmd, cur_pte);
	}
	return pte;
}
Пример #17
0
/*
 * Associate a virtual page frame with a given physical page frame 
 * and protection flags for that frame.
 */ 
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = swapper_pg_dir + pgd_index(vaddr);
	if (pgd_none(*pgd)) {
		BUG();
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		BUG();
		return;
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		BUG();
		return;
	}
	pte = pte_offset_kernel(pmd, vaddr);
	if (!pte_none(pteval))
		set_pte_at(&init_mm, vaddr, pte, pteval);
	else
		pte_clear(&init_mm, vaddr, pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}
Пример #18
0
static void pgd_ctor(void *addr)
{
	pte_t *ptep = (pte_t *)addr;
	int i;

	for (i = 0; i < 1024; i++, ptep++)
		pte_clear(NULL, 0, ptep);

}
Пример #19
0
void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, unsigned long sz)
{
	int i, ncontig;
	size_t pgsize;

	ncontig = num_contig_ptes(sz, &pgsize);

	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
		pte_clear(mm, addr, ptep);
}
Пример #20
0
static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
#ifdef CONFIG_HOMECACHE
    int home = initial_heap_home();
#endif
    unsigned long addr = (unsigned long) begin;

    if (kdata_huge && !initfree) {
        pr_warning("Warning: ignoring initfree=0:"
                   " incompatible with kdata=huge\n");
        initfree = 1;
    }
    end = (end + PAGE_SIZE - 1) & PAGE_MASK;
    local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
    for (addr = begin; addr < end; addr += PAGE_SIZE) {
        /*
         * Note we just reset the home here directly in the
         * page table.  We know this is safe because our caller
         * just flushed the caches on all the other cpus,
         * and they won't be touching any of these pages.
         */
        int pfn = kaddr_to_pfn((void *)addr);
        struct page *page = pfn_to_page(pfn);
        pte_t *ptep = virt_to_pte(NULL, addr);
        if (!initfree) {
            /*
             * If debugging page accesses then do not free
             * this memory but mark them not present - any
             * buggy init-section access will create a
             * kernel page fault:
             */
            pte_clear(&init_mm, addr, ptep);
            continue;
        }
#ifdef CONFIG_HOMECACHE
        set_page_home(page, home);
        __clear_bit(PG_homecache_nomigrate, &page->flags);
#endif
        __ClearPageReserved(page);
        init_page_count(page);
        if (pte_huge(*ptep))
            BUG_ON(!kdata_huge);
        else
            set_pte_at(&init_mm, addr, ptep,
                       pfn_pte(pfn, PAGE_KERNEL));
        memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
        free_page(addr);
        totalram_pages++;
    }
    pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
Пример #21
0
/* Remap IO memory, the same way as remap_page_range(), but use
 * the obio memory space.
 *
 * They use a pgprot that sets PAGE_IO and does not check the
 * mem_map table as this is independent of normal memory.
 *
 * As a special hack if the lowest bit of offset is set the
 * side-effect bit will be turned off.  This is used as a
 * performance improvement on FFB/AFB. -DaveM
 */
static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long offset, pgprot_t prot, int space)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t oldpage;
		pte_t entry;
		unsigned long curend = address + PAGE_SIZE;
		
		entry = mk_pte_io((offset & ~(0x1UL)), prot, space);
		if (!(address & 0xffff)) {
			if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
				entry = mk_pte_io((offset & ~(0x1UL)),
						  __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
						  space);
				curend = address + 0x400000;
				offset += 0x400000;
			} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
				entry = mk_pte_io((offset & ~(0x1UL)),
						  __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
						  space);
				curend = address + 0x80000;
				offset += 0x80000;
			} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
				entry = mk_pte_io((offset & ~(0x1UL)),
						  __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
						  space);
				curend = address + 0x10000;
				offset += 0x10000;
			} else
				offset += PAGE_SIZE;
		} else
			offset += PAGE_SIZE;

		if (offset & 0x1UL)
			pte_val(entry) &= ~(_PAGE_E);
		do {
			oldpage = *pte;
			pte_clear(pte);
			set_pte(pte, entry);
			forget_pte(oldpage);
			address += PAGE_SIZE;
			pte++;
		} while (address < curend);
	} while (address < end);
}
Пример #22
0
/*
 * Changing some bits of contiguous entries requires us to follow a
 * Break-Before-Make approach, breaking the whole contiguous set
 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
 * "Misprogramming of the Contiguous bit", page D4-1762.
 *
 * This helper performs the break step for use cases where the
 * original pte is not needed.
 */
static void clear_flush(struct mm_struct *mm,
			     unsigned long addr,
			     pte_t *ptep,
			     unsigned long pgsize,
			     unsigned long ncontig)
{
	struct vm_area_struct vma = { .vm_mm = mm };
	unsigned long i, saddr = addr;

	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
		pte_clear(mm, addr, ptep);

	flush_tlb_range(&vma, saddr, addr);
}
Пример #23
0
static inline int copy_one_pte(pte_t * src, pte_t * dst)
{
	int error = 0;
	pte_t pte = *src;

	if (!pte_none(pte)) {
		error++;
		if (dst) {
			pte_clear(src);
			set_pte(dst, pte);
			error--;
		}
	}
	return error;
}
Пример #24
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		dec_preempt_count();
		preempt_check_resched();
		return;
	}
#endif

#if defined(CONFIG_DEBUG_HIGHMEM)
	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
		BUG();

	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(kmap_pte-idx);
	__flush_tlb_one(vaddr);
#elif defined(CONFIG_XEN)
	/*
	 * We must ensure there are no dangling pagetable references when
	 * returning memory to Xen (decrease_reservation).
	 * XXX TODO: We could make this faster by only zapping when
	 * kmap_flush_unused is called but that is trickier and more invasive.
	 */
	pte_clear(kmap_pte-idx);
#endif

	dec_preempt_count();
	preempt_check_resched();
}
Пример #25
0
Файл: kmap.c Проект: nhanh0/hah
/* XXX This routine could be used with iterate_page() to replace
 * unmap_uncached_page() and save a little code space but I didn't
 * do that since I'm not certain whether this is the right path. -PB
 */
static void unmap_cached_pte(pte_t * pte, unsigned long arg)
{
    pte_t page = *pte;
    pte_clear(pte);
    if (!pte_none(page)) {
        if (pte_present(page)) {
            unsigned long map_nr = pte_pagenr(page);
            if (map_nr < max_mapnr)
                __free_page(mem_map + map_nr);
        } else {
            printk(KERN_CRIT
                   "Whee.. Swapped out page in kernel page table\n");
        }
    }
}
Пример #26
0
/*
 * For SH-4, we have our own implementation for ptep_get_and_clear
 */
inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	pte_t pte = *ptep;

	pte_clear(mm, addr, ptep);
	if (!pte_not_present(pte)) {
		unsigned long pfn = pte_pfn(pte);
		if (pfn_valid(pfn)) {
			struct page *page = pfn_to_page(pfn);
			struct address_space *mapping = page_mapping(page);
			if (!mapping || !mapping_writably_mapped(mapping))
				__clear_bit(PG_mapped, &page->flags);
		}
	}
	return pte;
}
Пример #27
0
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
{
	pte_t entry;
	int i;

	entry = *ptep;

	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
		pte_clear(mm, addr, ptep);
		addr += PAGE_SIZE;
		ptep++;
	}

	return entry;
}
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
    pgd_t *pgd;
    pud_t *pud;
    pte_t *pte = NULL;


    pgd = pgd_offset(mm, addr);
    pud = pud_alloc(mm, pgd, addr);
    if (pud)
        pte = (pte_t *)pmd_alloc(mm, pud, addr);

    if (pte)
        pte_clear(mm, addr, pte);

    return pte;
}
static void ion_clean_and_unmap(unsigned long vaddr, pte_t *ptep,
				size_t size, bool memory_zero)
{
	int i;

	flush_cache_vmap(vaddr, vaddr + size);

	if (memory_zero)
		memset((void *)vaddr, 0, size);

	dmac_flush_range((void *)vaddr, (void *)vaddr + size);

	for (i = 0; i < (size / PAGE_SIZE); i++)
		pte_clear(&init_mm, (void *)vaddr + (i * PAGE_SIZE), ptep + i);

	flush_cache_vunmap(vaddr, vaddr + size);
	flush_tlb_kernel_range(vaddr, vaddr + size);
}
Пример #30
0
/* Remap IO memory, the same way as remap_pfn_range(), but use
 * the obio memory space.
 *
 * They use a pgprot that sets PAGE_IO and does not check the
 * mem_map table as this is independent of normal memory.
 */
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
	unsigned long offset, pgprot_t prot, int space)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t oldpage = *pte;
		pte_clear(mm, address, pte);
		set_pte(pte, mk_pte_io(offset, prot, space));
		address += PAGE_SIZE;
		offset += PAGE_SIZE;
		pte++;
	} while (address < end);
}