コード例 #1
0
ファイル: pgtable_32.c プロジェクト: AK101111/linux
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
	pmd_t *pd;
	pte_t *pg;
	int err = -ENOMEM;

	/* Use upper 10 bits of VA to index the first level map */
	pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
	/* Use middle 10 bits of VA to index the second-level map */
	pg = pte_alloc_kernel(pd, va);
	if (pg != 0) {
		err = 0;
		/* The PTE should never be already set nor present in the
		 * hash table
		 */
		BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
		       flags);
		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
						     __pgprot(flags)));
	}
コード例 #2
0
ファイル: kmap.c プロジェクト: sarnobat/knoppix
/*
 * __iounmap unmaps nearly everything, so be careful
 * it doesn't free currently pointer/page tables anymore but it
 * wans't used anyway and might be added later.
 */
void __iounmap(void *addr, unsigned long size)
{
	unsigned long virtaddr = (unsigned long)addr;
	pgd_t *pgd_dir;
	pmd_t *pmd_dir;
	pte_t *pte_dir;

	while ((long)size > 0) {
		pgd_dir = pgd_offset_k(virtaddr);
		if (pgd_bad(*pgd_dir)) {
			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
			pgd_clear(pgd_dir);
			return;
		}
		pmd_dir = pmd_offset(pgd_dir, virtaddr);

		if (CPU_IS_020_OR_030) {
			int pmd_off = (virtaddr/PTRTREESIZE) & 15;

			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
				pmd_dir->pmd[pmd_off] = 0;
				virtaddr += PTRTREESIZE;
				size -= PTRTREESIZE;
				continue;
			}
		}

		if (pmd_bad(*pmd_dir)) {
			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
			pmd_clear(pmd_dir);
			return;
		}
		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);

		pte_val(*pte_dir) = 0;
		virtaddr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	flush_tlb_all();
}
コード例 #3
0
/**
 * __flush_hash_table_range - Flush all HPTEs for a given address range
 *                            from the hash table (and the TLB). But keeps
 *                            the linux PTEs intact.
 *
 * @mm		: mm_struct of the target address space (generally init_mm)
 * @start	: starting address
 * @end         : ending address (not included in the flush)
 *
 * This function is mostly to be used by some IO hotplug code in order
 * to remove all hash entries from a given address range used to map IO
 * space on a removed PCI-PCI bidge without tearing down the full mapping
 * since 64K pages may overlap with other bridges when using 64K pages
 * with 4K HW pages on IO space.
 *
 * Because of that usage pattern, it is implemented for small size rather
 * than speed.
 */
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	bool is_thp;
	int hugepage_shift;
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp,
							&hugepage_shift);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (is_thp)
			trace_hugepage_invalidate(start, pte);
		if (!(pte & H_PAGE_HASHPTE))
			continue;
		if (unlikely(is_thp))
			hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
		else
			hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}
コード例 #4
0
ファイル: tlb.c プロジェクト: 0-T-0/ps4-linux
static unsigned get_pte_for_vaddr(unsigned vaddr)
{
	struct task_struct *task = get_current();
	struct mm_struct *mm = task->mm;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	if (!mm)
		mm = task->active_mm;
	pgd = pgd_offset(mm, vaddr);
	if (pgd_none_or_clear_bad(pgd))
		return 0;
	pmd = pmd_offset(pgd, vaddr);
	if (pmd_none_or_clear_bad(pmd))
		return 0;
	pte = pte_offset_map(pmd, vaddr);
	if (!pte)
		return 0;
	return pte_val(*pte);
}
コード例 #5
0
ファイル: dma.c プロジェクト: EMCAntimatter/linux
static int
page_set_nocache(pte_t *pte, unsigned long addr,
		 unsigned long next, struct mm_walk *walk)
{
	unsigned long cl;
	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];

	pte_val(*pte) |= _PAGE_CI;

	/*
	 * Flush the page out of the TLB so that the new page flags get
	 * picked up next time there's an access
	 */
	flush_tlb_page(NULL, addr);

	/* Flush page out of dcache */
	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
		mtspr(SPR_DCBFR, cl);

	return 0;
}
コード例 #6
0
ファイル: dvma.c プロジェクト: 274914765/C
inline unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
{
    unsigned long pte;
    unsigned long j;
    pte_t ptep;

    j = *(volatile unsigned long *)kaddr;
    *(volatile unsigned long *)kaddr = j;

    ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
    pte = pte_val(ptep);
//        printk("dvma_remap: addr %lx -> %lx pte %08lx len %x\n",
//               kaddr, vaddr, pte, len);
    if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
        sun3_put_pte(vaddr, pte);
        ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte;
    }

    return (vaddr + (kaddr & ~PAGE_MASK));

}
コード例 #7
0
ファイル: hugetlbpage.c プロジェクト: Endika/linux
static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
{
	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
	unsigned int shift;

	switch (tte_szbits) {
	case _PAGE_SZ256MB_4U:
		shift = HPAGE_256MB_SHIFT;
		break;
	case _PAGE_SZ4MB_4U:
		shift = REAL_HPAGE_SHIFT;
		break;
	case _PAGE_SZ64K_4U:
		shift = HPAGE_64K_SHIFT;
		break;
	default:
		shift = PAGE_SHIFT;
		break;
	}
	return shift;
}
コード例 #8
0
ファイル: callchain.c プロジェクト: 0x000000FF/edison-linux
/*
 * On 64-bit we don't want to invoke hash_page on user addresses from
 * interrupt context, so if the access faults, we read the page tables
 * to find which page (if any) is mapped and access it directly.
 */
static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
{
	int ret = -EFAULT;
	pgd_t *pgdir;
	pte_t *ptep, pte;
	unsigned shift;
	unsigned long addr = (unsigned long) ptr;
	unsigned long offset;
	unsigned long pfn, flags;
	void *kaddr;

	pgdir = current->mm->pgd;
	if (!pgdir)
		return -EFAULT;

	local_irq_save(flags);
	ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
	if (!ptep)
		goto err_out;
	if (!shift)
		shift = PAGE_SHIFT;

	/* align address to page boundary */
	offset = addr & ((1UL << shift) - 1);

	pte = READ_ONCE(*ptep);
	if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
		goto err_out;
	pfn = pte_pfn(pte);
	if (!page_is_ram(pfn))
		goto err_out;

	/* no highmem to worry about here */
	kaddr = pfn_to_kaddr(pfn);
	memcpy(buf, kaddr + offset, nb);
	ret = 0;
err_out:
	local_irq_restore(flags);
	return ret;
}
コード例 #9
0
ファイル: hugetlbpage.c プロジェクト: 03199618/linux
static inline pte_t __pmd_to_pte(pmd_t pmd)
{
	pte_t pte;

	/*
	 * Convert encoding	  pmd bits	  pte bits
	 *			..R...I...y.	.IR...wrdytp
	 * empty		..0...1...0. -> .10...000000
	 * prot-none, old	..0...1...1. -> .10...001001
	 * prot-none, young	..1...1...1. -> .10...001101
	 * read-only, old	..1...1...0. -> .11...011001
	 * read-only, young	..1...0...1. -> .01...011101
	 * read-write, old	..0...1...0. -> .10...111001
	 * read-write, young	..0...0...1. -> .00...111101
	 * Huge ptes are dirty by definition
	 */
	if (pmd_present(pmd)) {
		pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
			(pmd_val(pmd) & PAGE_MASK);
		if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
			pte_val(pte) |= _PAGE_INVALID;
		if (pmd_prot_none(pmd)) {
			if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
				pte_val(pte) |= _PAGE_YOUNG;
		} else {
			pte_val(pte) |= _PAGE_READ;
			if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
				pte_val(pte) |= _PAGE_PROTECT;
			else
				pte_val(pte) |= _PAGE_WRITE;
			if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
				pte_val(pte) |= _PAGE_YOUNG;
		}
	} else
		pte_val(pte) = _PAGE_INVALID;
	return pte;
}
コード例 #10
0
int arch_prepare_hugepage(struct page *page)
{
	unsigned long addr = page_to_phys(page);
	pte_t pte;
	pte_t *ptep;
	int i;

	if (MACHINE_HAS_HPAGE)
		return 0;

	ptep = (pte_t *) pte_alloc_one(&init_mm, address);
	if (!ptep)
		return -ENOMEM;

	pte = mk_pte(page, PAGE_RW);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
		pte_val(pte) += PAGE_SIZE;
	}
	page[1].index = (unsigned long) ptep;
	return 0;
}
コード例 #11
0
/*
 * On 64-bit we don't want to invoke hash_page on user addresses from
 * interrupt context, so if the access faults, we read the page tables
 * to find which page (if any) is mapped and access it directly.
 */
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
	pgd_t *pgdir;
	pte_t *ptep, pte;
	int pagesize;
	unsigned long addr = (unsigned long) ptr;
	unsigned long offset;
	unsigned long pfn;
	void *kaddr;

	pgdir = current->mm->pgd;
	if (!pgdir)
		return -EFAULT;

	pagesize = get_slice_psize(current->mm, addr);

	/* align address to page boundary */
	offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
	addr -= offset;

	if (is_huge_psize(pagesize))
		ptep = huge_pte_offset(current->mm, addr);
	else
		ptep = find_linux_pte(pgdir, addr);

	if (ptep == NULL)
		return -EFAULT;
	pte = *ptep;
	if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
		return -EFAULT;
	pfn = pte_pfn(pte);
	if (!page_is_ram(pfn))
		return -EFAULT;

	/* no highmem to worry about here */
	kaddr = pfn_to_kaddr(pfn);
	memcpy(ret, kaddr + offset, nb);
	return 0;
}
コード例 #12
0
ファイル: tlb.c プロジェクト: BinVul/linux2.6.32
static void update_dtlb(unsigned long address, pte_t pte)
{
	u32 tlbehi;
	u32 mmucr;

	/*
	 * We're not changing the ASID here, so no need to flush the
	 * pipeline.
	 */
	tlbehi = sysreg_read(TLBEHI);
	tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
	tlbehi |= address & MMU_VPN_MASK;
	tlbehi |= SYSREG_BIT(TLBEHI_V);
	sysreg_write(TLBEHI, tlbehi);

	/* Does this mapping already exist? */
	__builtin_tlbs();
	mmucr = sysreg_read(MMUCR);

	if (mmucr & SYSREG_BIT(MMUCR_N)) {
		/* Not found -- pick a not-recently-accessed entry */
		unsigned int rp;
		u32 tlbar = sysreg_read(TLBARLO);

		rp = 32 - fls(tlbar);
		if (rp == 32) {
			rp = 0;
			sysreg_write(TLBARLO, -1L);
		}

		mmucr = SYSREG_BFINS(DRP, rp, mmucr);
		sysreg_write(MMUCR, mmucr);
	}

	sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);

	/* Let's go */
	__builtin_tlbw();
}
コード例 #13
0
ファイル: tlb.c プロジェクト: dduval/kernel-rhel3
/*
 * For each address in the range, find the pte for the address
 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
 * the corresponding HPTE.
 */
void
local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long pmd_end;
	unsigned int ctx = mm->context;

	if (Hash == 0) {
		_tlbia();
		return;
	}
	start &= PAGE_MASK;
	if (start >= end)
		return;
	pmd = pmd_offset(pgd_offset(mm, start), start);
	do {
		pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
		if (!pmd_none(*pmd)) {
			if (!pmd_end || pmd_end > end)
				pmd_end = end;
			pte = pte_offset(pmd, start);
			do {
				if ((pte_val(*pte) & _PAGE_HASHPTE) != 0)
					flush_hash_page(ctx, start, pte);
				start += PAGE_SIZE;
				++pte;
			} while (start && start < pmd_end);
		} else {
			start = pmd_end;
		}
		++pmd;
	} while (start && start < end);

#ifdef CONFIG_SMP
	smp_send_tlb_invalidate(0);
#endif	
}
コード例 #14
0
ファイル: tlb.c プロジェクト: dduval/kernel-rhel3
void
local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	struct mm_struct *mm;
	pmd_t *pmd;
	pte_t *pte;

	if (Hash == 0) {
		_tlbie(vmaddr);
		return;
	}
	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
	pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
	if (!pmd_none(*pmd)) {
		pte = pte_offset(pmd, vmaddr);
		if (pte_val(*pte) & _PAGE_HASHPTE)
			flush_hash_page(mm->context, vmaddr, pte);
	}
#ifdef CONFIG_SMP
	smp_send_tlb_invalidate(0);
#endif	
}
コード例 #15
0
/*
 * Only called after testing nattch and SHM_DEST.
 * Here pages, pgtable and shmid_kernel are freed.
 */
static void killseg (int id)
{
    struct shmid_kernel *shp;
    int i, numpages;

    shp = shm_segs[id];
    if (shp == IPC_NOID || shp == IPC_UNUSED) {
        printk ("shm nono: killseg called on unused seg id=%d\n", id);
        return;
    }
    shp->u.shm_perm.seq++;     /* for shmat */
    shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI); /* increment, but avoid overflow */
    shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
    used_segs--;
    if (id == max_shmid)
        while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
    if (!shp->shm_pages) {
        printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
        return;
    }
    numpages = shp->shm_npages;
    for (i = 0; i < numpages ; i++) {
        pte_t pte;
        pte = __pte(shp->shm_pages[i]);
        if (pte_none(pte))
            continue;
        if (pte_present(pte)) {
            free_page (pte_page(pte));
            shm_rss--;
        } else {
            swap_free(pte_val(pte));
            shm_swp--;
        }
    }
    vfree(shp->shm_pages);
    shm_tot -= numpages;
    kfree(shp);
    return;
}
コード例 #16
0
ファイル: pgtable.c プロジェクト: CCNITSilchar/linux
/*
 * set_pte stores a linux PTE into the linux page table.
 */
void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
		pte_t pte)
{
	/*
	 * When handling numa faults, we already have the pte marked
	 * _PAGE_PRESENT, but we can be sure that it is not in hpte.
	 * Hence we can use set_pte_at for them.
	 */
	VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep));

	/* Add the pte bit when trying to set a pte */
	pte = __pte(pte_val(pte) | _PAGE_PTE);

	/* Note: mm->context.id might not yet have been assigned as
	 * this context might not have been activated yet when this
	 * is called.
	 */
	pte = set_pte_filter(pte);

	/* Perform the setting of the PTE */
	__set_pte_at(mm, addr, ptep, pte, 0);
}
コード例 #17
0
ファイル: mm.c プロジェクト: minority1728645/LKH
static unsigned long v2p(unsigned long va) 
{ 
    pgd_t *pgd_tmp=NULL; 
    pud_t *pud_tmp=NULL; 
    pmd_t *pmd_tmp=NULL; 
    pte_t *pte_tmp=NULL; 
 
    if(!find_vma(current->mm,va)){
        printk("<0>" "translation not found.\n"); 
        return 0; 
    } 
    pgd_tmp = pgd_offset(current->mm,va); 
    if(pgd_none(*pgd_tmp)){
        printk("<0>" "translation not found.\n");
        return 0; 
    } 
    pud_tmp = pud_offset(pgd_tmp,va); 
    if(pud_none(*pud_tmp)){ 
        printk("<0>" "translation not found.\n"); 
        return 0; 
    } 
    pmd_tmp = pmd_offset(pud_tmp,va); 
    if(pmd_none(*pmd_tmp)){ 
        printk("<0>" "translation not found.\n"); 
    } 
 
    pte_tmp = pte_offset_kernel(pmd_tmp,va); 
 
    if(pte_none(*pte_tmp)){ 
        printk("<0>" "translation not found.\n");
        return 0; 
    } 
    if(!pte_present(*pte_tmp)){ 
        printk("<0>" "translation not found.\n");
        return 0; 
    } 
    return (pte_val(*pte_tmp)&PAGE_MASK)|(va&~PAGE_MASK); 
} 
コード例 #18
0
ファイル: oleole_spt.c プロジェクト: nminoru/oleolevm
static void reactivate_pte_table(pmd_t *pmd)
{
	int i;
	pte_t *pte;
	unsigned long val;

	val  = pmd_val(*pmd);

	if (likely(val & ~(_PAGE_DEACTIVATED|_PAGE_PRESENT)))
		val &= ~_PAGE_DEACTIVATED;
	else
		val &= ~(_PAGE_DEACTIVATED|_PAGE_PRESENT);

	*pmd = __pmd(val);
	
	/* PTEs */
	pte = pte_offset_map(pmd, 0);
	for (i=0 ; i<PTRS_PER_PTE ; i++, pte++) {
		val  = pte_val(*pte);
		val |= (_PAGE_DEACTIVATED|_PAGE_PRESENT);
		*pte = __pte(val);
	}
}
コード例 #19
0
static uint32_t user_va2pa(struct mm_struct *mm, uint32_t addr)
{
	pgd_t *pgd = pgd_offset(mm, addr);
	uint32_t pa = 0;
	
	if (!pgd_none(*pgd)) {
		pud_t *pud = pud_offset(pgd, addr);
		if (!pud_none(*pud)) {
			pmd_t *pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd)) {
				pte_t *ptep, pte;
				
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
					pa = pte_val(pte) & PAGE_MASK;
				pte_unmap(ptep);
			}
		}
	}
	
	return pa;
}
コード例 #20
0
unsigned int m4u_user_v2p(unsigned int va)
{
    unsigned int pageOffset = (va & (PAGE_SIZE - 1));
    pgd_t *pgd;
    pmd_t *pmd;
    pte_t *pte;
    unsigned int pa;

    if( (!current)||(!(current->mm)))
    {
        M4UMSG("error in m4u_user_v2p: current=%d or current->mm is zero\n", current);
        return 0;
    }
    
    pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
    if(pgd_none(*pgd)||pgd_bad(*pgd))
    {
        M4UMSG("warning: m4u_user_v2p(), va=0x%x, pgd invalid! \n", va);
        return 0;
    }
    
    pmd = pmd_offset(pgd, va);
    if(pmd_none(*pmd)||pmd_bad(*pmd))
    {
        M4UMSG("warning: m4u_user_v2p(), va=0x%x, pmd invalid! \n", va);
        return 0;
    }
        
    pte = pte_offset_map(pmd, va);
    if(pte_present(*pte)) 
    { 
        pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; 
        return pa; 
    }     

    return 0;
}
コード例 #21
0
ファイル: stram.c プロジェクト: chinnyannieb/empeg-hijack
/*
 * The swap entry has been read in advance, and we return 1 to indicate
 * that the page has been used or is no longer needed.
 *
 * Always set the resulting pte to be nowrite (the same as COW pages
 * after one process has exited).  We don't know just how many PTEs will
 * share this swap entry, so be cautious and let do_wp_page work out
 * what to do if a write is requested later.
 */
static inline void unswap_pte(struct vm_area_struct * vma, unsigned long
			      address, pte_t *dir, unsigned long entry,
			      unsigned long page /*, int isswap */)
{
	pte_t pte = *dir;

	if (pte_none(pte))
		return;
	if (pte_present(pte)) {
		/* If this entry is swap-cached, then page must already
                   hold the right address for any copies in physical
                   memory */
		if (pte_page(pte) != page)
			return;
		if (0 /* isswap */)
			mem_map[MAP_NR(pte_page(pte))].offset = page;
		else
			/* We will be removing the swap cache in a moment, so... */
			set_pte(dir, pte_mkdirty(pte));
		return;
	}
	if (pte_val(pte) != entry)
		return;

	if (0 /* isswap */) {
		DPRINTK( "unswap_pte: replacing entry %08lx by %08lx", entry, page );
		set_pte(dir, __pte(page));
	}
	else {
		DPRINTK( "unswap_pte: replacing entry %08lx by new page %08lx",
				 entry, page );
		set_pte(dir, pte_mkdirty(mk_pte(page,vma->vm_page_prot)));
		atomic_inc(&mem_map[MAP_NR(page)].count);
		++vma->vm_mm->rss;
	}
	swap_free(entry);
}
コード例 #22
0
/*
 * Dump out the page tables associated with 'addr' in mm 'mm'.
 */
void show_pte(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;

	if (!mm)
		mm = &init_mm;

	pr_alert("pgd = %p\n", mm->pgd);
	pgd = pgd_offset(mm, addr);
	pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));

	do {
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		if (pgd_none(*pgd) || pgd_bad(*pgd))
			break;

		pud = pud_offset(pgd, addr);
		printk(", *pud=%016llx", pud_val(*pud));
		if (pud_none(*pud) || pud_bad(*pud))
			break;

		pmd = pmd_offset(pud, addr);
		printk(", *pmd=%016llx", pmd_val(*pmd));
		if (pmd_none(*pmd) || pmd_bad(*pmd))
			break;

		pte = pte_offset_map(pmd, addr);
		printk(", *pte=%016llx", pte_val(*pte));
		pte_unmap(pte);
	} while(0);

	printk("\n");
}
コード例 #23
0
ファイル: kvm-ia64.c プロジェクト: mpalmer/linux-2.6
void kvm_arch_hardware_disable(void *garbage)
{

	long status;
	int slot;
	unsigned long pte;
	unsigned long saved_psr;
	unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);

	pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
				PAGE_KERNEL));

	local_irq_save(saved_psr);
	slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
	local_irq_restore(saved_psr);
	if (slot < 0)
		return;

	status = ia64_pal_vp_exit_env(host_iva);
	if (status)
		printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
				status);
	ia64_ptr_entry(0x3, slot);
}
コード例 #24
0
/*
 * On 64-bit we don't want to invoke hash_page on user addresses from
 * interrupt context, so if the access faults, we read the page tables
 * to find which page (if any) is mapped and access it directly.
 */
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
	pgd_t *pgdir;
	pte_t *ptep, pte;
	unsigned shift;
	unsigned long addr = (unsigned long) ptr;
	unsigned long offset;
	unsigned long pfn;
	void *kaddr;

	pgdir = current->mm->pgd;
	if (!pgdir)
		return -EFAULT;

	ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
	if (!shift)
		shift = PAGE_SHIFT;

	/* align address to page boundary */
	offset = addr & ((1UL << shift) - 1);
	addr -= offset;

	if (ptep == NULL)
		return -EFAULT;
	pte = *ptep;
	if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
		return -EFAULT;
	pfn = pte_pfn(pte);
	if (!page_is_ram(pfn))
		return -EFAULT;

	/* no highmem to worry about here */
	kaddr = pfn_to_kaddr(pfn);
	memcpy(ret, kaddr + offset, nb);
	return 0;
}
コード例 #25
0
ファイル: tlb.c プロジェクト: 01org/KVMGT-kernel
/*
 * Called at the end of pagefault, for a userspace mapped page
 *  -pre-install the corresponding TLB entry into MMU
 *  -Finalize the delayed D-cache flush of kernel mapping of page due to
 *  	flush_dcache_page(), copy_user_page()
 *
 * Note that flush (when done) involves both WBACK - so physical page is
 * in sync as well as INV - so any non-congruent aliases don't remain
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
		      pte_t *ptep)
{
	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
	unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
	struct page *page = pfn_to_page(pte_pfn(*ptep));

	create_tlb(vma, vaddr, ptep);

	if (page == ZERO_PAGE(0)) {
		return;
	}

	/*
	 * Exec page : Independent of aliasing/page-color considerations,
	 *	       since icache doesn't snoop dcache on ARC, any dirty
	 *	       K-mapping of a code page needs to be wback+inv so that
	 *	       icache fetch by userspace sees code correctly.
	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
	 *	       so userspace sees the right data.
	 *  (Avoids the flush for Non-exec + congruent mapping case)
	 */
	if ((vma->vm_flags & VM_EXEC) ||
	     addr_not_cache_congruent(paddr, vaddr)) {

		int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
		if (dirty) {
			/* wback + inv dcache lines */
			__flush_dcache_page(paddr, paddr);

			/* invalidate any existing icache lines */
			if (vma->vm_flags & VM_EXEC)
				__inv_icache_page(paddr, vaddr);
		}
	}
}
コード例 #26
0
ファイル: kgsl.c プロジェクト: bigsley-sdg/linux_on_wince_htc
static long flush_l1_cache_range(unsigned long addr, int size)
{
	struct page *page;
	pte_t *pte_ptr;
	unsigned long end;

	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		pte_ptr = kgsl_get_pte_from_vaddr(end);
		if (!pte_ptr)
			return -EINVAL;

		page = pte_page(pte_val(*pte_ptr));
		if (!page) {
			KGSL_DRV_ERR("could not find page for pte\n");
			pte_unmap(pte_ptr);
			return -EINVAL;
		}

		pte_unmap(pte_ptr);
		flush_dcache_page(page);
	}

	return 0;
}
コード例 #27
0
ファイル: commproc.c プロジェクト: chinnyannieb/empeg-hijack
void
m8xx_cpm_reset(uint host_page_addr)
{
	volatile immap_t	 *imp;
	volatile cpm8xx_t	*commproc;
	pte_t			*pte;

	imp = (immap_t *)IMAP_ADDR;
	commproc = (cpm8xx_t *)&imp->im_cpm;

#ifdef notdef
	/* We can't do this.  It seems to blow away the microcode
	 * patch that EPPC-Bug loaded for us.  EPPC-Bug uses SCC1 for
	 * Ethernet, SMC1 for the console, and I2C for serial EEPROM.
	 * Our own drivers quickly reset all of these.
	 */

	/* Perform a reset.
	*/
	commproc->cp_cpcr = (CPM_CR_RST | CPM_CR_FLG);

	/* Wait for it.
	*/
	while (commproc->cp_cpcr & CPM_CR_FLG);
#endif

	/* Set SDMA Bus Request priority 5.
	 * On 860T, this also enables FEC priority 6.  I am not sure
	 * this is what we realy want for some applications, but the
	 * manual recommends it.
	 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
	*/
	imp->im_siu_conf.sc_sdcr = 1;

	/* Reclaim the DP memory for our use.
	*/
	dp_alloc_base = CPM_DATAONLY_BASE;
	dp_alloc_top = dp_alloc_base + CPM_DATAONLY_SIZE;
	/* Set the host page for allocation.
	*/
	host_buffer = host_page_addr;	/* Host virtual page address */
	host_end = host_page_addr + PAGE_SIZE;
	pte = va_to_pte(&init_task, host_page_addr);
	pte_val(*pte) |= _PAGE_NO_CACHE;
	flush_tlb_page(current->mm->mmap, host_buffer);

	/* Tell everyone where the comm processor resides.
	*/
	cpmp = (cpm8xx_t *)commproc;

	/* Initialize the CPM interrupt controller.
	*/
	((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr =
	    (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
		(((5)/2) << 13) | CICR_HP_MASK;
	/* I hard coded the CPM interrupt to 5 above
	 * since the CPM_INTERRUPT define is relative to
	 * the linux irq structure not what the hardware
	 * belives. -- Cort
	 */
	((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr = 0;
	/* Set our interrupt handler with the core CPU.
	*/
	if (request_irq(CPM_INTERRUPT, cpm_interrupt, 0, "cpm", NULL) != 0)
		panic("Could not allocate CPM IRQ!");

	/* Install our own error handler.
	*/
	cpm_install_handler(CPMVEC_ERROR, cpm_error_interrupt, NULL);
	((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr |= CICR_IEN;
}
コード例 #28
0
/*
 * Copy memory by briefly enabling incoherent cacheline-at-a-time mode.
 *
 * We set up our own source and destination PTEs that we fully control.
 * This is the only way to guarantee that we don't race with another
 * thread that is modifying the PTE; we can't afford to try the
 * copy_{to,from}_user() technique of catching the interrupt, since
 * we must run with interrupts disabled to avoid the risk of some
 * other code seeing the incoherent data in our cache.  (Recall that
 * our cache is indexed by PA, so even if the other code doesn't use
 * our kmap_atomic virtual addresses, they'll still hit in cache using
 * the normal VAs that aren't supposed to hit in cache.)
 */
static void memcpy_multicache(void *dest, const void *source,
			      pte_t dst_pte, pte_t src_pte, int len)
{
	int idx;
	unsigned long flags, newsrc, newdst;
	pmd_t *pmdp;
	pte_t *ptep;
	int type0, type1;
	int cpu = get_cpu();

	/*
	 * Disable interrupts so that we don't recurse into memcpy()
	 * in an interrupt handler, nor accidentally reference
	 * the PA of the source from an interrupt routine.  Also
	 * notify the simulator that we're playing games so we don't
	 * generate spurious coherency warnings.
	 */
	local_irq_save(flags);
	sim_allow_multiple_caching(1);

	/* Set up the new dest mapping */
	type0 = kmap_atomic_idx_push();
	idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
	newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
	pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
	ptep = pte_offset_kernel(pmdp, newdst);
	if (pte_val(*ptep) != pte_val(dst_pte)) {
		set_pte(ptep, dst_pte);
		local_flush_tlb_page(NULL, newdst, PAGE_SIZE);
	}

	/* Set up the new source mapping */
	type1 = kmap_atomic_idx_push();
	idx += (type0 - type1);
	src_pte = hv_pte_set_nc(src_pte);
	src_pte = hv_pte_clear_writable(src_pte);  /* be paranoid */
	newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
	pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc);
	ptep = pte_offset_kernel(pmdp, newsrc);
	__set_pte(ptep, src_pte);   /* set_pte() would be confused by this */
	local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);

	/* Actually move the data. */
	__memcpy_asm((void *)newdst, (const void *)newsrc, len);

	/*
	 * Remap the source as locally-cached and not OLOC'ed so that
	 * we can inval without also invaling the remote cpu's cache.
	 * This also avoids known errata with inv'ing cacheable oloc data.
	 */
	src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3);
	src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */
	__set_pte(ptep, src_pte);   /* set_pte() would be confused by this */
	local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);

	/*
	 * Do the actual invalidation, covering the full L2 cache line
	 * at the end since __memcpy_asm() is somewhat aggressive.
	 */
	__inv_buffer((void *)newsrc, len);

	/*
	 * We're done: notify the simulator that all is back to normal,
	 * and re-enable interrupts and pre-emption.
	 */
	kmap_atomic_idx_pop();
	kmap_atomic_idx_pop();
	sim_allow_multiple_caching(0);
	local_irq_restore(flags);
	put_cpu();
}
コード例 #29
0
ファイル: kmap.c プロジェクト: sarnobat/knoppix
/*
 * Set new cache mode for some kernel address space.
 * The caller must push data for that range itself, if such data may already
 * be in the cache.
 */
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
	unsigned long virtaddr = (unsigned long)addr;
	pgd_t *pgd_dir;
	pmd_t *pmd_dir;
	pte_t *pte_dir;

	if (CPU_IS_040_OR_060) {
		switch (cmode) {
		case IOMAP_FULL_CACHING:
			cmode = _PAGE_CACHE040;
			break;
		case IOMAP_NOCACHE_SER:
		default:
			cmode = _PAGE_NOCACHE_S;
			break;
		case IOMAP_NOCACHE_NONSER:
			cmode = _PAGE_NOCACHE;
			break;
		case IOMAP_WRITETHROUGH:
			cmode = _PAGE_CACHE040W;
			break;
		}
	} else {
		switch (cmode) {
		case IOMAP_NOCACHE_SER:
		case IOMAP_NOCACHE_NONSER:
		default:
			cmode = _PAGE_NOCACHE030;
			break;
		case IOMAP_FULL_CACHING:
		case IOMAP_WRITETHROUGH:
			cmode = 0;
		}
	}

	while ((long)size > 0) {
		pgd_dir = pgd_offset_k(virtaddr);
		if (pgd_bad(*pgd_dir)) {
			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
			pgd_clear(pgd_dir);
			return;
		}
		pmd_dir = pmd_offset(pgd_dir, virtaddr);

		if (CPU_IS_020_OR_030) {
			int pmd_off = (virtaddr/PTRTREESIZE) & 15;

			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
							 _CACHEMASK040) | cmode;
				virtaddr += PTRTREESIZE;
				size -= PTRTREESIZE;
				continue;
			}
		}

		if (pmd_bad(*pmd_dir)) {
			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
			pmd_clear(pmd_dir);
			return;
		}
		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);

		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
		virtaddr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	flush_tlb_all();
}
コード例 #30
0
ファイル: kmap.c プロジェクト: sarnobat/knoppix
void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
	struct vm_struct *area;
	unsigned long virtaddr, retaddr;
	long offset;
	pgd_t *pgd_dir;
	pmd_t *pmd_dir;
	pte_t *pte_dir;

	/*
	 * Don't allow mappings that wrap..
	 */
	if (!size || size > physaddr + size)
		return NULL;

#ifdef CONFIG_AMIGA
	if (MACH_IS_AMIGA) {
		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
		    && (cacheflag == IOMAP_NOCACHE_SER))
			return (void *)physaddr;
	}
#endif

#ifdef DEBUG
	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
#endif
	/*
	 * Mappings have to be aligned
	 */
	offset = physaddr & (IO_SIZE - 1);
	physaddr &= -IO_SIZE;
	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;

	/*
	 * Ok, go for it..
	 */
	area = get_io_area(size);
	if (!area)
		return NULL;

	virtaddr = (unsigned long)area->addr;
	retaddr = virtaddr + offset;
#ifdef DEBUG
	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
#endif

	/*
	 * add cache and table flags to physical address
	 */
	if (CPU_IS_040_OR_060) {
		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
			     _PAGE_ACCESSED | _PAGE_DIRTY);
		switch (cacheflag) {
		case IOMAP_FULL_CACHING:
			physaddr |= _PAGE_CACHE040;
			break;
		case IOMAP_NOCACHE_SER:
		default:
			physaddr |= _PAGE_NOCACHE_S;
			break;
		case IOMAP_NOCACHE_NONSER:
			physaddr |= _PAGE_NOCACHE;
			break;
		case IOMAP_WRITETHROUGH:
			physaddr |= _PAGE_CACHE040W;
			break;
		}
	} else {
		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
		switch (cacheflag) {
		case IOMAP_NOCACHE_SER:
		case IOMAP_NOCACHE_NONSER:
		default:
			physaddr |= _PAGE_NOCACHE030;
			break;
		case IOMAP_FULL_CACHING:
		case IOMAP_WRITETHROUGH:
			break;
		}
	}

	while ((long)size > 0) {
#ifdef DEBUG
		if (!(virtaddr & (PTRTREESIZE-1)))
			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
		pgd_dir = pgd_offset_k(virtaddr);
		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
		if (!pmd_dir) {
			printk("ioremap: no mem for pmd_dir\n");
			return NULL;
		}

		if (CPU_IS_020_OR_030) {
			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
			physaddr += PTRTREESIZE;
			virtaddr += PTRTREESIZE;
			size -= PTRTREESIZE;
		} else {
			pte_dir = pte_alloc_kernel(&init_mm, pmd_dir, virtaddr);
			if (!pte_dir) {
				printk("ioremap: no mem for pte_dir\n");
				return NULL;
			}

			pte_val(*pte_dir) = physaddr;
			virtaddr += PAGE_SIZE;
			physaddr += PAGE_SIZE;
			size -= PAGE_SIZE;
		}
	}
#ifdef DEBUG
	printk("\n");
#endif
	flush_tlb_all();

	return (void *)retaddr;
}