Exemple #1
0
int ehea_create_busmap( void )
{
	u64 vaddr = EHEA_BUSMAP_START;
	unsigned long high_section_index = 0;
	int i;

	/*
	 * Sections are not in ascending order -> Loop over all sections and
	 * find the highest PFN to compute the required map size.
	*/
	ehea_bmap.valid_sections = 0;

	for (i = 0; i < NR_MEM_SECTIONS; i++)
		if (valid_section_nr(i))
			high_section_index = i;

	ehea_bmap.entries = high_section_index + 1;
	ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));

	if (!ehea_bmap.vaddr)
		return -ENOMEM;

	for (i = 0 ; i < ehea_bmap.entries; i++) {
		unsigned long pfn = section_nr_to_pfn(i);

		if (pfn_valid(pfn)) {
			ehea_bmap.vaddr[i] = vaddr;
			vaddr += EHEA_SECTSIZE;
			ehea_bmap.valid_sections++;
		} else
			ehea_bmap.vaddr[i] = 0;
	}

	return 0;
}
Exemple #2
0
/*
 * The performance critical leaf functions are made noinline otherwise gcc
 * inlines everything into a single function which results in too much
 * register pressure.
 */
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask, result;
	pte_t *ptep;

	result = _PAGE_PRESENT|_PAGE_USER;
	if (write)
		result |= _PAGE_RW;
	mask = result | _PAGE_SPECIAL;

	ptep = pte_offset_kernel(&pmd, addr);
	do {
		pte_t pte = ACCESS_ONCE(*ptep);
		struct page *page;

		if ((pte_val(pte) & mask) != result)
			return 0;
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
		page = pte_page(pte);
		if (!page_cache_get_speculative(page))
			return 0;
		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
			put_page(page);
			return 0;
		}
		pages[*nr] = page;
		(*nr)++;

	} while (ptep++, addr += PAGE_SIZE, addr != end);

	return 1;
}
Exemple #3
0
/* Is address valid for reading? */
static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
{
	HV_PTE *l1_pgtable = kbt->pgtable;
	HV_PTE *l2_pgtable;
	unsigned long pfn;
	HV_PTE pte;
	struct page *page;

	if (l1_pgtable == NULL)
		return 0;	/* can't read user space in other tasks */

	pte = l1_pgtable[HV_L1_INDEX(address)];
	if (!hv_pte_get_present(pte))
		return 0;
	pfn = hv_pte_get_pfn(pte);
	if (pte_huge(pte)) {
		if (!pfn_valid(pfn)) {
			pr_err("huge page has bad pfn %#lx\n", pfn);
			return 0;
		}
		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
	}

	page = pfn_to_page(pfn);
	if (PageHighMem(page)) {
		pr_err("L2 page table not in LOWMEM (%#llx)\n",
		       HV_PFN_TO_CPA(pfn));
		return 0;
	}
	l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
	pte = l2_pgtable[HV_L2_INDEX(address)];
	return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
Exemple #4
0
void __init page_ext_init(void)
{
	unsigned long pfn;
	int nid;

	if (!invoke_need_callbacks())
		return;

	for_each_node_state(nid, N_MEMORY) {
		unsigned long start_pfn, end_pfn;

		start_pfn = node_start_pfn(nid);
		end_pfn = node_end_pfn(nid);
		/*
		 * start_pfn and end_pfn may not be aligned to SECTION and the
		 * page->flags of out of node pages are not initialized.  So we
		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
		 */
		for (pfn = start_pfn; pfn < end_pfn;
			pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {

			if (!pfn_valid(pfn))
				continue;
			/*
			 * Nodes's pfns can be overlapping.
			 * We know some arch can have a nodes layout such as
			 * -------------pfn-------------->
			 * N0 | N1 | N2 | N0 | N1 | N2|....
			 */
			if (pfn_to_nid(pfn) != nid)
				continue;
			if (init_section_page_ext(pfn, nid))
				goto oom;
		}
	}
Exemple #5
0
void __update_cache(unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;
	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;

	pfn = pte_pfn(pte);
	if (unlikely(!pfn_valid(pfn)))
		return;
	page = pfn_to_page(pfn);
	if (Page_dcache_dirty(page)) {
		if (PageHighMem(page))
			addr = (unsigned long)kmap_atomic(page);
		else
			addr = (unsigned long)page_address(page);

		if (exec || pages_do_alias(addr, address & PAGE_MASK))
			flush_data_cache_page(addr);

		if (PageHighMem(page))
			__kunmap_atomic((void *)addr);

		ClearPageDcacheDirty(page);
	}
}
Exemple #6
0
void __update_cache(struct vm_area_struct *vma, unsigned long address,
	pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;
	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;

	pfn = pte_pfn(pte);
	if (unlikely(!pfn_valid(pfn))) {
		wmb();
		return;
	}
	page = pfn_to_page(pfn);
	if (page_mapped(page) && Page_dcache_dirty(page)) {
		void *kaddr = NULL;
		if (PageHighMem(page)) {
			addr = (unsigned long)kmap_atomic(page);
			kaddr = (void *)addr;
		} else
			addr = (unsigned long) page_address(page);
		if (exec || (cpu_has_dc_aliases &&
		    pages_do_alias(addr, address & PAGE_MASK))) {
			flush_data_cache_page(addr);
			ClearPageDcacheDirty(page);
		}

		if (kaddr)
			kunmap_atomic((void *)kaddr);
	}
	wmb();  /* finish any outstanding arch cache flushes before ret to user */
}
Exemple #7
0
void show_mem(void)
{
#ifndef CONFIG_NEED_MULTIPLE_NODES  /* XXX(hch): later.. */
	int pfn, total = 0, reserved = 0;
	int shared = 0, cached = 0;
	int highmem = 0;
	struct page *page;

	printk("Mem-info:\n");
	show_free_areas();
	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
	pfn = max_mapnr;
	while (pfn-- > 0) {
		if (!pfn_valid(pfn))
			continue;
		page = pfn_to_page(pfn);
		total++;
		if (PageHighMem(page))
			highmem++;
		if (PageReserved(page))
			reserved++;
		else if (PageSwapCache(page))
			cached++;
		else if (page_count(page))
			shared += page_count(page) - 1;
	}
	printk("%d pages of RAM\n", total);
	printk("%d pages of HIGHMEM\n", highmem);
	printk("%d reserved pages\n", reserved);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
#endif
}
Exemple #8
0
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
	struct mm_struct *mm;
	struct tsb *tsb;
	unsigned long tag, flags;
	unsigned long tsb_index, tsb_hash_shift;

	if (tlb_type != hypervisor) {
		unsigned long pfn = pte_pfn(pte);
		unsigned long pg_flags;
		struct page *page;

		if (pfn_valid(pfn) &&
		    (page = pfn_to_page(pfn), page_mapping(page)) &&
		    ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
			int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
				   PG_dcache_cpu_mask);
			int this_cpu = get_cpu();

			/* This is just to optimize away some function calls
			 * in the SMP case.
			 */
			if (cpu == this_cpu)
				flush_dcache_page_impl(page);
			else
				smp_flush_dcache_page_impl(page, cpu);

			clear_dcache_dirty_cpu(page, cpu);

			put_cpu();
		}
Exemple #9
0
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn;
	unsigned long pg_flags;

	pfn = pte_pfn(pte);
	if (pfn_valid(pfn) &&
	    (page = pfn_to_page(pfn), page_mapping(page)) &&
	    ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
		int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
		int this_cpu = get_cpu();

		/* This is just to optimize away some function calls
		 * in the SMP case.
		 */
		if (cpu == this_cpu)
			flush_dcache_page_impl(page);
		else
			smp_flush_dcache_page_impl(page, cpu);

		clear_dcache_dirty_cpu(page, cpu);

		put_cpu();
	}
Exemple #10
0
static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
{
	struct page *page;
	struct page_ext *page_ext;
	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
	unsigned long end_pfn = pfn + zone->spanned_pages;
	unsigned long count = 0;

	/* Scan block by block. First and last block may be incomplete */
	pfn = zone->zone_start_pfn;

	/*
	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
	 * a zone boundary, it will be double counted between zones. This does
	 * not matter as the mixed block count will still be correct
	 */
	for (; pfn < end_pfn; ) {
		if (!pfn_valid(pfn)) {
			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
			continue;
		}

		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
		block_end_pfn = min(block_end_pfn, end_pfn);

		page = pfn_to_page(pfn);

		for (; pfn < block_end_pfn; pfn++) {
			if (!pfn_valid_within(pfn))
				continue;

			page = pfn_to_page(pfn);

			/*
			 * We are safe to check buddy flag and order, because
			 * this is init stage and only single thread runs.
			 */
			if (PageBuddy(page)) {
				pfn += (1UL << page_order(page)) - 1;
				continue;
			}

			if (PageReserved(page))
				continue;

			page_ext = lookup_page_ext(page);

			/* Maybe overraping zone */
			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
				continue;

			/* Found early allocated page */
			set_page_owner(page, 0, 0);
			count++;
		}
	}

	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
		pgdat->node_id, zone->name, count);
}
Exemple #11
0
static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
{
	pte_t pte = READ_ONCE(*src_ptep);

	if (pte_valid(pte)) {
		/*
		 * Resume will overwrite areas that may be marked
		 * read only (code, rodata). Clear the RDONLY bit from
		 * the temporary mappings we use during restore.
		 */
		set_pte(dst_ptep, pte_mkwrite(pte));
	} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
		/*
		 * debug_pagealloc will removed the PTE_VALID bit if
		 * the page isn't in use by the resume kernel. It may have
		 * been in use by the original kernel, in which case we need
		 * to put it back in our copy to do the restore.
		 *
		 * Before marking this entry valid, check the pfn should
		 * be mapped.
		 */
		BUG_ON(!pfn_valid(pte_pfn(pte)));

		set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
	}
}
Exemple #12
0
static void
count_pmd_pages(struct mm_struct * mm, struct vm_area_struct * vma,
                pmd_t *dir, unsigned long address, unsigned long end,
                signed char *data_buf, int node_map)
{
    pte_t * pte;
    unsigned long pmd_end;
    struct page *page;
    unsigned long pfn;
    int val, index;

    if (pmd_none(*dir))
        return;
    pmd_end = (address + PMD_SIZE) & PMD_MASK;
    if (end > pmd_end)
        end = pmd_end;
    index = 0;
    do {
        pte = pte_offset_map(dir, address);
        if (!pte_none(pte) && pte_present(*pte)) {
            pfn = pte_pfn(*pte);
            if (pfn_valid(pfn)) {
                page = pfn_to_page(pfn);
                val = node_map ? page_to_nid(page) :
                      page_count(page);
                val = (val > 99) ? 99 : val;
                data_buf[index] = val;
            }
        }
        address += PAGE_SIZE;
        pte++;
        index++;
    } while (address && (address < end));
    return;
}
Exemple #13
0
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
	unsigned long block_sz, start_pfn;
	int sections_per_block;
	int i, nid;

	start_pfn = base >> PAGE_SHIFT;

	lock_device_hotplug();

	if (!pfn_valid(start_pfn))
		goto out;

	block_sz = pseries_memory_block_size();
	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
	nid = memory_add_physaddr_to_nid(base);

	for (i = 0; i < sections_per_block; i++) {
		remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
		base += MIN_MEMORY_BLOCK_SIZE;
	}

out:
	/* Update memory regions for memory remove */
	memblock_remove(base, memblock_size);
	unlock_device_hotplug();
	return 0;
}
Exemple #14
0
/*
 * map a kernel virtual address or kernel logical address to a phys address
 */
static inline u32 physical_address(u32 virt, int write)
{
    struct page *page;
       /* kernel static-mapped address */
    DPRINTK(" get physical address: virt %x , write %d\n", virt, write);
    if (virt_addr_valid(virt)) 
    {
        return __pa((u32) virt);
    }
    if (virt >= high_memory)
	    return 0;
    
    if (virt >= TASK_SIZE)
    {
        page = follow_page(find_extend_vma(&init_mm, virt), (u32) virt, write);
    }
    else
    {
        page = follow_page(find_extend_vma(current->mm, virt), (u32) virt, write);
    }
    
    if (pfn_valid(page_to_pfn(page)))
    {
        return ((page_to_pfn(page) << PAGE_SHIFT) |
                       ((u32) virt & (PAGE_SIZE - 1)));
    }
    else
    {
        return 0;
    }
}
Exemple #15
0
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
		      pte_t * pte)
{
	struct page *page;
	unsigned long flags;
	unsigned long pfn = pte_pfn(*pte);

	if (!pfn_valid(pfn))
		return;

	if (vma->vm_mm == current->active_mm) {
		local_irq_save(flags);
		__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
		__nds32__tlbop_rwr(*pte);
		__nds32__isb();
		local_irq_restore(flags);
	}

	page = pfn_to_page(pfn);
	if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
	    (vma->vm_flags & VM_EXEC)) {
		local_irq_save(flags);
		cpu_dcache_wbinval_page((unsigned long)page_address(page));
		local_irq_restore(flags);
	}
}
Exemple #16
0
static int __init my_init(void)
{
	int i;
	unsigned long CountFree = 0, CountLocked = 0, CountDirty = 0, CountUp2Date = 0;
	struct page *cp;

	for (i=0; i <= get_num_physpages(); i++)
	{
		if (pfn_valid(i)) 
		{
			cp = pfn_to_page(i);
			if (!page_count(cp))
			{
				CountFree++;
			}
			else
			{
				CountLocked += PageLocked(cp);
				CountDirty  += PageDirty(cp);
				CountUp2Date+= PageUptodate(cp);
			}
		}
	}

	pr_info("\n       Pages Free = %lu", CountFree);
	pr_info("\n     Pages Locked = %lu", CountLocked);
	pr_info("\n     Pages Dirty  = %lu", CountDirty);
	pr_info("\n Pages Up to date = %lu \n", CountUp2Date);
					
	return 0;
}
Exemple #17
0
static ssize_t kpageflags_read(struct file *file, char __user *buf,
			     size_t count, loff_t *ppos)
{
	u64 __user *out = (u64 __user *)buf;
	struct page *ppage;
	unsigned long src = *ppos;
	unsigned long pfn;
	ssize_t ret = 0;

	pfn = src / KPMSIZE;
	count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
	if (src & KPMMASK || count & KPMMASK)
		return -EINVAL;

	while (count > 0) {
		if (pfn_valid(pfn))
			ppage = pfn_to_page(pfn);
		else
			ppage = NULL;

		if (put_user(stable_page_flags(ppage), out)) {
			ret = -EFAULT;
			break;
		}

		pfn++;
		out++;
		count -= KPMSIZE;
	}

	*ppos += (char __user *)out - buf;
	if (!ret)
		ret = (char __user *)out - buf;
	return ret;
}
Exemple #18
0
/*
 * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags()
 */
static pte_t do_dcache_icache_coherency(pte_t pte, unsigned long addr)
{
	unsigned long pfn = pte_pfn(pte);
	struct page *page;

	if (unlikely(!pfn_valid(pfn)))
		return pte;
	page = pfn_to_page(pfn);

#ifdef CONFIG_8xx
       /* On 8xx, cache control instructions (particularly
        * "dcbst" from flush_dcache_icache) fault as write
        * operation if there is an unpopulated TLB entry
        * for the address in question. To workaround that,
        * we invalidate the TLB here, thus avoiding dcbst
        * misbehaviour.
        */
       _tlbil_va(addr, 0 /* 8xx doesn't care about PID */);
#endif

	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
		pr_devel("do_dcache_icache_coherency... flushing\n");
		flush_dcache_icache_page(page);
		set_bit(PG_arch_1, &page->flags);
	}
	else
		pr_devel("do_dcache_icache_coherency... already clean\n");
	return __pte(pte_val(pte) | _PAGE_HWEXEC);
}
void __init page_cgroup_init(void)
{
	unsigned long pfn;
	int nid;

	if (mem_cgroup_disabled())
		return;

	for_each_node_state(nid, N_HIGH_MEMORY) {
		unsigned long start_pfn, end_pfn;

		start_pfn = node_start_pfn(nid);
		end_pfn = node_end_pfn(nid);
		for (pfn = start_pfn;
		     pfn < end_pfn;
                     pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {

			if (!pfn_valid(pfn))
				continue;
			if (pfn_to_nid(pfn) != nid)
				continue;
			if (init_section_page_cgroup(pfn, nid))
				goto oom;
		}
	}
Exemple #20
0
/*
 * The probe routines leave the pages reserved, just as the bootmem code does.
 * Make sure they're still that way.
 */
static bool pages_correctly_reserved(unsigned long start_pfn)
{
	int i, j;
	struct page *page;
	unsigned long pfn = start_pfn;

	/*
	 * memmap between sections is not contiguous except with
	 * SPARSEMEM_VMEMMAP. We lookup the page once per section
	 * and assume memmap is contiguous within each section
	 */
	for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
		if (WARN_ON_ONCE(!pfn_valid(pfn)))
			return false;
		page = pfn_to_page(pfn);

		for (j = 0; j < PAGES_PER_SECTION; j++) {
			if (PageReserved(page + j))
				continue;

			printk(KERN_WARNING "section number %ld page number %d "
				"not reserved, was it already online?\n",
				pfn_to_section_nr(pfn), j);

			return false;
		}
	}

	return true;
}
Exemple #21
0
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
		   pte_t *ptep, pte_t orig, int fullmm)
{
	if (tlb_type != hypervisor &&
	    pte_dirty(orig)) {
		unsigned long paddr, pfn = pte_pfn(orig);
		struct address_space *mapping;
		struct page *page;

		if (!pfn_valid(pfn))
			goto no_cache_flush;

		page = pfn_to_page(pfn);
		if (PageReserved(page))
			goto no_cache_flush;

		/* A real file page? */
		mapping = page_mapping(page);
		if (!mapping)
			goto no_cache_flush;

		paddr = (unsigned long) page_address(page);
		if ((paddr ^ vaddr) & (1 << 13))
			flush_dcache_page_all(mm, page);
	}

no_cache_flush:
	if (!fullmm)
		tlb_batch_add_one(mm, vaddr, pte_exec(orig));
}
Exemple #22
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
#ifndef CONFIG_SQUASHFS_DEBUGGER_AUTO_DIAGNOSE
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Exemple #23
0
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
{
	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
	unsigned long nr;

	vaddr &= PAGE_MASK;
	if (pte_exec(orig))
		vaddr |= 0x1UL;

	if (tlb_type != hypervisor &&
	    pte_dirty(orig)) {
		unsigned long paddr, pfn = pte_pfn(orig);
		struct address_space *mapping;
		struct page *page;

		if (!pfn_valid(pfn))
			goto no_cache_flush;

		page = pfn_to_page(pfn);
		if (PageReserved(page))
			goto no_cache_flush;

		/* A real file page? */
		mapping = page_mapping(page);
		if (!mapping)
			goto no_cache_flush;

		paddr = (unsigned long) page_address(page);
		if ((paddr ^ vaddr) & (1 << 13))
			flush_dcache_page_all(mm, page);
	}

no_cache_flush:

	/*
	if (tb->fullmm) {
		put_cpu_var(tlb_batch);
		return;
	}
	*/

	nr = tb->tlb_nr;

	if (unlikely(nr != 0 && mm != tb->mm)) {
		flush_tlb_pending();
		nr = 0;
	}

	if (nr == 0)
		tb->mm = mm;

	tb->vaddrs[nr] = vaddr;
	tb->tlb_nr = ++nr;
	if (nr >= TLB_BATCH_NR)
		flush_tlb_pending();

	put_cpu_var(tlb_batch);
}
Exemple #24
0
static void *try_ram_remap(resource_size_t offset, size_t size)
{
	unsigned long pfn = PHYS_PFN(offset);

	/* In the simple case just return the existing linear address */
	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
		return __va(offset);
	return NULL; /* fallback to arch_memremap_wb */
}
Exemple #25
0
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
			      unsigned long size, pgprot_t vma_prot)
{
	if (!pfn_valid(pfn))
		return pgprot_noncached(vma_prot);
	else if (file->f_flags & O_SYNC)
		return pgprot_writecombine(vma_prot);
	return vma_prot;
}
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
{
	/* For normal memory we already have a cacheable mapping. */
	if (pfn_valid(__phys_to_pfn(phys_addr)))
		return (void __iomem *)__phys_to_virt(phys_addr);

	return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
				__builtin_return_address(0));
}
Exemple #27
0
/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a PTE in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux PTE.
 * 
 * This must always be called with the pte lock held.
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
		      pte_t pte)
{
#ifdef CONFIG_PPC_STD_MMU
	unsigned long access = 0, trap;
#endif
	unsigned long pfn = pte_pfn(pte);

	/* handle i-cache coherency */
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
	    !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
	    pfn_valid(pfn)) {
		struct page *page = pfn_to_page(pfn);
#ifdef CONFIG_8xx
		/* On 8xx, cache control instructions (particularly
		 * "dcbst" from flush_dcache_icache) fault as write
		 * operation if there is an unpopulated TLB entry
		 * for the address in question. To workaround that,
		 * we invalidate the TLB here, thus avoiding dcbst
		 * misbehaviour.
		 */
		_tlbie(address);
#endif
		if (!PageReserved(page)
		    && !test_bit(PG_arch_1, &page->flags)) {
			if (vma->vm_mm == current->active_mm) {
				__flush_dcache_icache((void *) address);
			} else
				flush_dcache_icache_page(page);
			set_bit(PG_arch_1, &page->flags);
		}
	}

#ifdef CONFIG_PPC_STD_MMU
	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
	if (!pte_young(pte) || address >= TASK_SIZE)
		return;

	/* We try to figure out if we are coming from an instruction
	 * access fault and pass that down to __hash_page so we avoid
	 * double-faulting on execution of fresh text. We have to test
	 * for regs NULL since init will get here first thing at boot
	 *
	 * We also avoid filling the hash if not coming from a fault
	 */
	if (current->thread.regs == NULL)
		return;
	trap = TRAP(current->thread.regs);
	if (trap == 0x400)
		access |= _PAGE_EXEC;
	else if (trap != 0x300)
		return;
	hash_preload(vma->vm_mm, address, access, trap);
#endif /* CONFIG_PPC_STD_MMU */
}
void show_mem(unsigned int filter)
{
	pg_data_t *pgdat;
	unsigned long total = 0, reserved = 0, shared = 0,
		nonshared = 0, highmem = 0;

	printk("Mem-Info:\n");
	show_free_areas(filter);

	if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
		return;

	for_each_online_pgdat(pgdat) {
		unsigned long i, flags;

		pgdat_resize_lock(pgdat, &flags);
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			struct page *page;
			unsigned long pfn = pgdat->node_start_pfn + i;

			if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
				touch_nmi_watchdog();

			if (!pfn_valid(pfn))
				continue;

			page = pfn_to_page(pfn);

			if (PageHighMem(page))
				highmem++;

			if (PageReserved(page))
				reserved++;
			else if (page_count(page) == 1)
				nonshared++;
			else if (page_count(page) > 1)
				shared += page_count(page) - 1;

			total++;
		}
		pgdat_resize_unlock(pgdat, &flags);
	}

	printk("%lu pages RAM\n", total);
#ifdef CONFIG_HIGHMEM
	printk("%lu pages HighMem\n", highmem);
#endif
	printk("%lu pages reserved\n", reserved);
	printk("%lu pages shared\n", shared);
	printk("%lu pages non-shared\n", nonshared);
#ifdef CONFIG_QUICKLIST
	printk("%lu pages in pagetable cache\n",
		quicklist_total_size());
#endif
}
Exemple #29
0
/*
 * Check if this vmemmap page is already initialised.  If any section
 * which overlaps this vmemmap page is initialised then this page is
 * initialised already.
 */
static int __meminit vmemmap_populated(unsigned long start, int page_size)
{
	unsigned long end = start + page_size;
	start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));

	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
		if (pfn_valid(page_to_pfn((struct page *)start)))
			return 1;

	return 0;
}
Exemple #30
0
void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
	struct vm_struct *area;

	/*
	 * High mappings must be section aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped
	 */
	if (pfn_valid(pfn)) {
		WARN(1, "BUG: Your driver calls ioremap() on\n"
			"system memory.  This leads to architecturally\n"
			"unpredictable behaviour, and ioremap() will fail in\n"
			"the next kernel release. Please fix your driver.\n");
		return NULL;
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;

	if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_UNICORE_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
		vunmap((void *)addr);
		return NULL;
	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}