コード例 #1
0
ファイル: hugetlbpage.c プロジェクト: CCNITSilchar/linux
void huge_ptep_set_wrprotect(struct mm_struct *mm,
			     unsigned long addr, pte_t *ptep)
{
	unsigned long pfn, dpfn;
	pgprot_t hugeprot;
	int ncontig, i;
	size_t pgsize;
	pte_t pte;

	if (!pte_cont(READ_ONCE(*ptep))) {
		ptep_set_wrprotect(mm, addr, ptep);
		return;
	}

	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
	dpfn = pgsize >> PAGE_SHIFT;

	pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
	pte = pte_wrprotect(pte);

	hugeprot = pte_pgprot(pte);
	pfn = pte_pfn(pte);

	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}
コード例 #2
0
ファイル: hugetlbpage.c プロジェクト: CCNITSilchar/linux
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
			       unsigned long addr, pte_t *ptep,
			       pte_t pte, int dirty)
{
	int ncontig, i, changed = 0;
	size_t pgsize = 0;
	unsigned long pfn = pte_pfn(pte), dpfn;
	pgprot_t hugeprot;
	pte_t orig_pte;

	if (!pte_cont(pte))
		return ptep_set_access_flags(vma, addr, ptep, pte, dirty);

	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
	dpfn = pgsize >> PAGE_SHIFT;

	orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
	if (!pte_same(orig_pte, pte))
		changed = 1;

	/* Make sure we don't lose the dirty state */
	if (pte_dirty(orig_pte))
		pte = pte_mkdirty(pte);

	hugeprot = pte_pgprot(pte);
	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
		set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));

	return changed;
}
コード例 #3
0
ファイル: hugetlbpage.c プロジェクト: CCNITSilchar/linux
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
			    pte_t *ptep, pte_t pte)
{
	size_t pgsize;
	int i;
	int ncontig;
	unsigned long pfn, dpfn;
	pgprot_t hugeprot;

	/*
	 * Code needs to be expanded to handle huge swap and migration
	 * entries. Needed for HUGETLB and MEMORY_FAILURE.
	 */
	WARN_ON(!pte_present(pte));

	if (!pte_cont(pte)) {
		set_pte_at(mm, addr, ptep, pte);
		return;
	}

	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
	pfn = pte_pfn(pte);
	dpfn = pgsize >> PAGE_SHIFT;
	hugeprot = pte_pgprot(pte);

	clear_flush(mm, addr, ptep, pgsize, ncontig);

	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}
コード例 #4
0
ファイル: dump_pagetables.c プロジェクト: 03199618/linux
static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
							unsigned long P)
{
	int i;
	pte_t *start;

	start = (pte_t *) pmd_page_vaddr(addr);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		pgprot_t prot = pte_pgprot(*start);

		st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
		note_page(m, st, prot, 4);
		start++;
	}
}
コード例 #5
0
ファイル: pgtable_32.c プロジェクト: OpenChannelSSD/linux
void __iomem *
ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{
	pte_t pte = __pte(flags);

	/* writeable implies dirty for kernel addresses */
	if (pte_write(pte))
		pte = pte_mkdirty(pte);

	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
	pte = pte_exprotect(pte);
	pte = pte_mkprivileged(pte);

	return __ioremap_caller(addr, size, pte_pgprot(pte), __builtin_return_address(0));
}
コード例 #6
0
ファイル: hypervisor.c プロジェクト: AsadRaza/OCTEON-Linux
void __init adjust_boot_vcpu_info(void)
{
	unsigned long lpfn, rpfn, lmfn, rmfn;
	pte_t *lpte, *rpte;
	unsigned int level;
	mmu_update_t mmu[2];

	/*
	 * setup_vcpu_info() cannot be used more than once for a given (v)CPU,
	 * hence we must swap the underlying MFNs of the two pages holding old
	 * and new vcpu_info of the boot CPU.
	 *
	 * Do *not* use __get_cpu_var() or percpu_{write,...}() here, as the per-
	 * CPU segment didn't get reloaded yet. Using percpu_read(), as in
	 * arch_use_lazy_mmu_mode(), though undesirable, is safe except for the
	 * accesses to variables that were updated in setup_percpu_areas().
	 */
	lpte = lookup_address((unsigned long)&per_cpu_var(vcpu_info)
			      + (__per_cpu_load - __per_cpu_start),
			      &level);
	rpte = lookup_address((unsigned long)&per_cpu(vcpu_info, 0), &level);
	BUG_ON(!lpte || !(pte_flags(*lpte) & _PAGE_PRESENT));
	BUG_ON(!rpte || !(pte_flags(*rpte) & _PAGE_PRESENT));
	lmfn = __pte_mfn(*lpte);
	rmfn = __pte_mfn(*rpte);

	if (lmfn == rmfn)
		return;

	lpfn = mfn_to_local_pfn(lmfn);
	rpfn = mfn_to_local_pfn(rmfn);

	printk(KERN_INFO
	       "Swapping MFNs for PFN %lx and %lx (MFN %lx and %lx)\n",
	       lpfn, rpfn, lmfn, rmfn);

	xen_l1_entry_update(lpte, pfn_pte_ma(rmfn, pte_pgprot(*lpte)));
	xen_l1_entry_update(rpte, pfn_pte_ma(lmfn, pte_pgprot(*rpte)));
#ifdef CONFIG_X86_64
	if (HYPERVISOR_update_va_mapping((unsigned long)__va(lpfn<<PAGE_SHIFT),
					 pfn_pte_ma(rmfn, PAGE_KERNEL_RO), 0))
		BUG();
#endif
	if (HYPERVISOR_update_va_mapping((unsigned long)__va(rpfn<<PAGE_SHIFT),
					 pfn_pte_ma(lmfn, PAGE_KERNEL),
					 UVMF_TLB_FLUSH))
		BUG();

	set_phys_to_machine(lpfn, rmfn);
	set_phys_to_machine(rpfn, lmfn);

	mmu[0].ptr = ((uint64_t)lmfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
	mmu[0].val = rpfn;
	mmu[1].ptr = ((uint64_t)rmfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
	mmu[1].val = lpfn;
	if (HYPERVISOR_mmu_update(mmu, 2, NULL, DOMID_SELF))
		BUG();

	/*
	 * Copy over all contents of the page just replaced, except for the
	 * vcpu_info itself, as it may have got updated after having been
	 * copied from __per_cpu_load[].
	 */
	memcpy(__va(rpfn << PAGE_SHIFT),
	       __va(lpfn << PAGE_SHIFT),
	       (unsigned long)&per_cpu_var(vcpu_info) & (PAGE_SIZE - 1));
	level = (unsigned long)(&per_cpu_var(vcpu_info) + 1) & (PAGE_SIZE - 1);
	if (level)
		memcpy(__va(rpfn << PAGE_SHIFT) + level,
		       __va(lpfn << PAGE_SHIFT) + level,
		       PAGE_SIZE - level);
}