Exemple #1
0
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
				 unsigned long virt, phys_addr_t size,
				 pgprot_t prot,
				 phys_addr_t (*alloc)(void))
{
	init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
}
Exemple #2
0
/*
 * Copy the current shadow region into a new pgdir.
 */
void __init kasan_copy_shadow(pgd_t *pgdir)
{
	pgd_t *pgd, *pgd_new, *pgd_end;

	pgd = pgd_offset_k(KASAN_SHADOW_START);
	pgd_end = pgd_offset_k(KASAN_SHADOW_END);
	pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
	do {
		set_pgd(pgd_new, *pgd);
	} while (pgd++, pgd_new++, pgd != pgd_end);
}
/*
 * Copy the current shadow region into a new pgdir.
 */
void __init kasan_copy_shadow(pgd_t *pgdir)
{
	pgd_t *pgdp, *pgdp_new, *pgdp_end;

	pgdp = pgd_offset_k(KASAN_SHADOW_START);
	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
	pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
	do {
		set_pgd(pgdp_new, READ_ONCE(*pgdp));
	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
}
Exemple #4
0
static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
			    unsigned long end)
{
	unsigned long next;
	unsigned long addr = start;
	pgd_t *src_pgdp = pgd_offset_k(start);

	dst_pgdp = pgd_offset_raw(dst_pgdp, start);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none(READ_ONCE(*src_pgdp)))
			continue;
		if (copy_pud(dst_pgdp, src_pgdp, addr, next))
			return -ENOMEM;
	} while (dst_pgdp++, src_pgdp++, addr = next, addr != end);

	return 0;
}
Exemple #5
0
/*
 * Copies length bytes, starting at src_start into an new page,
 * perform cache maintentance, then maps it at the specified address low
 * address as executable.
 *
 * This is used by hibernate to copy the code it needs to execute when
 * overwriting the kernel text. This function generates a new set of page
 * tables, which it loads into ttbr0.
 *
 * Length is provided as we probably only want 4K of data, even on a 64K
 * page system.
 */
static int create_safe_exec_page(void *src_start, size_t length,
				 unsigned long dst_addr,
				 phys_addr_t *phys_dst_addr,
				 void *(*allocator)(gfp_t mask),
				 gfp_t mask)
{
	int rc = 0;
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;
	unsigned long dst = (unsigned long)allocator(mask);

	if (!dst) {
		rc = -ENOMEM;
		goto out;
	}

	memcpy((void *)dst, src_start, length);
	flush_icache_range(dst, dst + length);

	pgdp = pgd_offset_raw(allocator(mask), dst_addr);
	if (pgd_none(READ_ONCE(*pgdp))) {
		pudp = allocator(mask);
		if (!pudp) {
			rc = -ENOMEM;
			goto out;
		}
		pgd_populate(&init_mm, pgdp, pudp);
	}

	pudp = pud_offset(pgdp, dst_addr);
	if (pud_none(READ_ONCE(*pudp))) {
		pmdp = allocator(mask);
		if (!pmdp) {
			rc = -ENOMEM;
			goto out;
		}
		pud_populate(&init_mm, pudp, pmdp);
	}

	pmdp = pmd_offset(pudp, dst_addr);
	if (pmd_none(READ_ONCE(*pmdp))) {
		ptep = allocator(mask);
		if (!ptep) {
			rc = -ENOMEM;
			goto out;
		}
		pmd_populate_kernel(&init_mm, pmdp, ptep);
	}

	ptep = pte_offset_kernel(pmdp, dst_addr);
	set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));

	/*
	 * Load our new page tables. A strict BBM approach requires that we
	 * ensure that TLBs are free of any entries that may overlap with the
	 * global mappings we are about to install.
	 *
	 * For a real hibernate/resume cycle TTBR0 currently points to a zero
	 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
	 * runtime services), while for a userspace-driven test_resume cycle it
	 * points to userspace page tables (and we must point it at a zero page
	 * ourselves). Elsewhere we only (un)install the idmap with preemption
	 * disabled, so T0SZ should be as required regardless.
	 */
	cpu_set_reserved_ttbr0();
	local_flush_tlb_all();
	write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
	isb();

	*phys_dst_addr = virt_to_phys((void *)dst);

out:
	return rc;
}