Пример #1
0
/*
 * Update the MMU hash table to correspond with a change to
 * a Linux PTE.  If wrprot is true, it is permissible to
 * change the existing HPTE to read-only rather than removing it
 * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
 */
void hpte_update(struct mm_struct *mm, unsigned long addr,
		 unsigned long pte, int wrprot)
{
	int i;
	unsigned long context = 0;
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);

	if (REGION_ID(addr) == USER_REGION_ID)
		context = mm->context.id;
	i = batch->index;

	/*
	 * This can happen when we are in the middle of a TLB batch and
	 * we encounter memory pressure (eg copy_page_range when it tries
	 * to allocate a new pte). If we have to reclaim memory and end
	 * up scanning and resetting referenced bits then our batch context
	 * will change mid stream.
	 */
	if (i != 0 && (context != batch->context ||
		       batch->large != pte_huge(pte))) {
		flush_tlb_pending();
		i = 0;
	}

	if (i == 0) {
		batch->context = context;
		batch->mm = mm;
		batch->large = pte_huge(pte);
	}
	batch->pte[i] = __pte(pte);
	batch->addr[i] = addr;
	batch->index = ++i;
	if (i >= PPC64_TLB_BATCH_NR)
		flush_tlb_pending();
}
Пример #2
0
/* Is address valid for reading? */
static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
{
	HV_PTE *l1_pgtable = kbt->pgtable;
	HV_PTE *l2_pgtable;
	unsigned long pfn;
	HV_PTE pte;
	struct page *page;

	if (l1_pgtable == NULL)
		return 0;	/* can't read user space in other tasks */

#ifdef CONFIG_64BIT
	/* Find the real l1_pgtable by looking in the l0_pgtable. */
	pte = l1_pgtable[HV_L0_INDEX(address)];
	if (!hv_pte_get_present(pte))
		return 0;
	pfn = hv_pte_get_pfn(pte);
	if (pte_huge(pte)) {
		if (!pfn_valid(pfn)) {
			pr_err("L0 huge page has bad pfn %#lx\n", pfn);
			return 0;
		}
		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
	}
	page = pfn_to_page(pfn);
	BUG_ON(PageHighMem(page));  /* No HIGHMEM on 64-bit. */
	l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
#endif
	pte = l1_pgtable[HV_L1_INDEX(address)];
	if (!hv_pte_get_present(pte))
		return 0;
	pfn = hv_pte_get_pfn(pte);
	if (pte_huge(pte)) {
		if (!pfn_valid(pfn)) {
			pr_err("huge page has bad pfn %#lx\n", pfn);
			return 0;
		}
		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
	}

	page = pfn_to_page(pfn);
	if (PageHighMem(page)) {
		pr_err("L2 page table not in LOWMEM (%#llx)\n",
		       HV_PFN_TO_CPA(pfn));
		return 0;
	}
	l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
	pte = l2_pgtable[HV_L2_INDEX(address)];
	return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
Пример #3
0
/* Is address valid for reading? */
static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
{
	HV_PTE *l1_pgtable = kbt->pgtable;
	HV_PTE *l2_pgtable;
	unsigned long pfn;
	HV_PTE pte;
	struct page *page;

	if (l1_pgtable == NULL)
		return 0;	/* can't read user space in other tasks */

	pte = l1_pgtable[HV_L1_INDEX(address)];
	if (!hv_pte_get_present(pte))
		return 0;
	pfn = hv_pte_get_pfn(pte);
	if (pte_huge(pte)) {
		if (!pfn_valid(pfn)) {
			pr_err("huge page has bad pfn %#lx\n", pfn);
			return 0;
		}
		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
	}

	page = pfn_to_page(pfn);
	if (PageHighMem(page)) {
		pr_err("L2 page table not in LOWMEM (%#llx)\n",
		       HV_PFN_TO_CPA(pfn));
		return 0;
	}
	l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
	pte = l2_pgtable[HV_L2_INDEX(address)];
	return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
Пример #4
0
/*
 * The __w1data area holds data that is only written during initialization,
 * and is read-only and thus freely cacheable thereafter.  Fix the page
 * table entries that cover that region accordingly.
 */
static void mark_w1data_ro(void)
{
	/* Loop over page table entries */
	unsigned long addr = (unsigned long)__w1data_begin;
	BUG_ON((addr & (PAGE_SIZE-1)) != 0);
	for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
		unsigned long pfn = kaddr_to_pfn((void *)addr);
		pte_t *ptep = virt_to_pte(NULL, addr);
		BUG_ON(pte_huge(*ptep));   /* not relevant for kdata_huge */
		set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
	}
}
Пример #5
0
static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
#ifdef CONFIG_HOMECACHE
    int home = initial_heap_home();
#endif
    unsigned long addr = (unsigned long) begin;

    if (kdata_huge && !initfree) {
        pr_warning("Warning: ignoring initfree=0:"
                   " incompatible with kdata=huge\n");
        initfree = 1;
    }
    end = (end + PAGE_SIZE - 1) & PAGE_MASK;
    local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
    for (addr = begin; addr < end; addr += PAGE_SIZE) {
        /*
         * Note we just reset the home here directly in the
         * page table.  We know this is safe because our caller
         * just flushed the caches on all the other cpus,
         * and they won't be touching any of these pages.
         */
        int pfn = kaddr_to_pfn((void *)addr);
        struct page *page = pfn_to_page(pfn);
        pte_t *ptep = virt_to_pte(NULL, addr);
        if (!initfree) {
            /*
             * If debugging page accesses then do not free
             * this memory but mark them not present - any
             * buggy init-section access will create a
             * kernel page fault:
             */
            pte_clear(&init_mm, addr, ptep);
            continue;
        }
#ifdef CONFIG_HOMECACHE
        set_page_home(page, home);
        __clear_bit(PG_homecache_nomigrate, &page->flags);
#endif
        __ClearPageReserved(page);
        init_page_count(page);
        if (pte_huge(*ptep))
            BUG_ON(!kdata_huge);
        else
            set_pte_at(&init_mm, addr, ptep,
                       pfn_pte(pfn, PAGE_KERNEL));
        memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
        free_page(addr);
        totalram_pages++;
    }
    pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
Пример #6
0
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (pud)
		pte = (pte_t *) pmd_alloc(mm, pud, addr);
	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));

	return pte;
}
Пример #7
0
pte_t *huge_pte_alloc(struct mm_struct *mm,
		      unsigned long addr, unsigned long sz)
{
	pgd_t *pgd;
	pud_t *pud;
	pte_t *pte = NULL;

	/* We do not yet support multiple huge page sizes. */
	BUG_ON(sz != PMD_SIZE);

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (pud)
		pte = (pte_t *) pmd_alloc(mm, pud, addr);
	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));

	return pte;
}
Пример #8
0
void homecache_change_page_home(struct page *page, int order, int home)
{
	int i, pages = (1 << order);
	unsigned long kva;

	BUG_ON(PageHighMem(page));
	BUG_ON(page_count(page) > 1);
	BUG_ON(page_mapcount(page) != 0);
	kva = (unsigned long) page_address(page);
	flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
		     kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
		     NULL, 0);

	for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
		pte_t *ptep = virt_to_pte(NULL, kva);
		pte_t pteval = *ptep;
		BUG_ON(!pte_present(pteval) || pte_huge(pteval));
		*ptep = pte_set_home(pteval, home);
	}
}