示例#1
0
/* This is called just after a mm has been created, but it has not
   been used yet.  We need to make sure that its pagetable is all
   read-only, and can be pinned. */
void xen_pgd_pin(pgd_t *pgd)
{
	struct multicall_space mcs;
	struct mmuext_op *op;

	xen_mc_batch();

	if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
		/* re-enable interrupts for kmap_flush_unused */
		xen_mc_issue(0);
		kmap_flush_unused();
		xen_mc_batch();
	}

	mcs = __xen_mc_entry(sizeof(*op));
	op = mcs.args;

#ifdef CONFIG_X86_PAE
	op->cmd = MMUEXT_PIN_L3_TABLE;
#else
	op->cmd = MMUEXT_PIN_L2_TABLE;
#endif
	op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);

	xen_mc_issue(0);
}
示例#2
0
/* Release a pagetables pages back as normal RW */
static void xen_pgd_unpin(pgd_t *pgd)
{
	struct mmuext_op *op;
	struct multicall_space mcs;

	xen_mc_batch();

	mcs = __xen_mc_entry(sizeof(*op));

	op = mcs.args;
	op->cmd = MMUEXT_UNPIN_TABLE;
	op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));

	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);

	pgd_walk(pgd, unpin_page, TASK_SIZE);

	xen_mc_issue(0);
}
示例#3
0
void __init xen_mark_init_mm_pinned(void)
{
	pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
}
示例#4
0
static void __pgd_unpin(pgd_t *pgd)
{
	xen_pgd_unpin(__pa(pgd));
	pgd_walk(pgd, PAGE_KERNEL);
	clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
}
示例#5
0
static void __pgd_pin(pgd_t *pgd)
{
	pgd_walk(pgd, PAGE_KERNEL_RO);
	xen_pgd_pin(__pa(pgd));
	set_bit(PG_pinned, &virt_to_page(pgd)->flags);
}