Exemplo n.º 1
0
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmdp, pmd_t pmd)
{
	pmd_t orig = *pmdp;

	*pmdp = pmd;

	if (mm == &init_mm)
		return;

	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
		/*
		 * Note that this routine only sets pmds for THP pages.
		 * Hugetlb pages are handled elsewhere.  We need to check
		 * for huge zero page.  Huge zero pages are like hugetlb
		 * pages in that there is no RSS, but there is the need
		 * for TSB entries.  So, huge zero page counts go into
		 * hugetlb_pte_count.
		 */
		if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
			if (is_huge_zero_page(pmd_page(pmd)))
				mm->context.hugetlb_pte_count++;
			else
				mm->context.thp_pte_count++;
		} else {
			if (is_huge_zero_page(pmd_page(orig)))
				mm->context.hugetlb_pte_count--;
			else
				mm->context.thp_pte_count--;
		}

		/* Do not try to allocate the TSB hash table if we
		 * don't have one already.  We have various locks held
		 * and thus we'll end up doing a GFP_KERNEL allocation
		 * in an atomic context.
		 *
		 * Instead, we let the first TLB miss on a hugepage
		 * take care of this.
		 */
	}

	if (!pmd_none(orig)) {
		addr &= HPAGE_MASK;
		if (pmd_trans_huge(orig)) {
			pte_t orig_pte = __pte(pmd_val(orig));
			bool exec = pte_exec(orig_pte);

			tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
					  REAL_HPAGE_SHIFT);
		} else {
			tlb_batch_pmd_scan(mm, addr, orig);
		}
	}
}
Exemplo n.º 2
0
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
		   pte_t *ptep, pte_t orig, int fullmm)
{
	if (tlb_type != hypervisor &&
	    pte_dirty(orig)) {
		unsigned long paddr, pfn = pte_pfn(orig);
		struct address_space *mapping;
		struct page *page;

		if (!pfn_valid(pfn))
			goto no_cache_flush;

		page = pfn_to_page(pfn);
		if (PageReserved(page))
			goto no_cache_flush;

		/* A real file page? */
		mapping = page_mapping(page);
		if (!mapping)
			goto no_cache_flush;

		paddr = (unsigned long) page_address(page);
		if ((paddr ^ vaddr) & (1 << 13))
			flush_dcache_page_all(mm, page);
	}

no_cache_flush:
	if (!fullmm)
		tlb_batch_add_one(mm, vaddr, pte_exec(orig));
}
Exemplo n.º 3
0
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                pmd_t *pmdp, pmd_t pmd)
{
    pmd_t orig = *pmdp;

    *pmdp = pmd;

    if (mm == &init_mm)
        return;

    if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
        if (pmd_val(pmd) & _PAGE_PMD_HUGE)
            mm->context.huge_pte_count++;
        else
            mm->context.huge_pte_count--;

        /* Do not try to allocate the TSB hash table if we
         * don't have one already.  We have various locks held
         * and thus we'll end up doing a GFP_KERNEL allocation
         * in an atomic context.
         *
         * Instead, we let the first TLB miss on a hugepage
         * take care of this.
         */
    }

    if (!pmd_none(orig)) {
        addr &= HPAGE_MASK;
        if (pmd_trans_huge(orig)) {
            pte_t orig_pte = __pte(pmd_val(orig));
            bool exec = pte_exec(orig_pte);

            tlb_batch_add_one(mm, addr, exec);
            tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
        } else {
            tlb_batch_pmd_scan(mm, addr, orig);
        }
    }
}
Exemplo n.º 4
0
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
			       pmd_t pmd, bool exec)
{
	unsigned long end;
	pte_t *pte;

	pte = pte_offset_map(&pmd, vaddr);
	end = vaddr + HPAGE_SIZE;
	while (vaddr < end) {
		if (pte_val(*pte) & _PAGE_VALID)
			tlb_batch_add_one(mm, vaddr, exec);
		pte++;
		vaddr += PAGE_SIZE;
	}
	pte_unmap(pte);
}