Ejemplo n.º 1
0
/*
 * Set the page permissions for a particular virtual address.  If the
 * address is a vmalloc mapping (or other non-linear mapping), then
 * find the linear mapping of the page and also set its protections to
 * match.
 */
static void set_aliased_prot(void *v, pgprot_t prot)
{
	int level;
	pte_t *ptep;
	pte_t pte;
	unsigned long pfn;
	struct page *page;

	ptep = lookup_address((unsigned long)v, &level);
	BUG_ON(ptep == NULL);

	pfn = pte_pfn(*ptep);
	page = pfn_to_page(pfn);

	pte = pfn_pte(pfn, prot);

	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
		BUG();

	if (!PageHighMem(page)) {
		void *av = __va(PFN_PHYS(pfn));

		if (av != v)
			if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
				BUG();
	} else
		kmap_flush_unused();
}
Ejemplo n.º 2
0
/*
 * Set the page permissions for a particular virtual address.  If the
 * address is a vmalloc mapping (or other non-linear mapping), then
 * find the linear mapping of the page and also set its protections to
 * match.
 */
static void set_aliased_prot(void *v, pgprot_t prot)
{
	int level;
	pte_t *ptep;
	pte_t pte;
	unsigned long pfn;
	struct page *page;
	unsigned char dummy;

	ptep = lookup_address((unsigned long)v, &level);
	BUG_ON(ptep == NULL);

	pfn = pte_pfn(*ptep);
	page = pfn_to_page(pfn);

	pte = pfn_pte(pfn, prot);

	/*
	 * Careful: update_va_mapping() will fail if the virtual address
	 * we're poking isn't populated in the page tables.  We don't
	 * need to worry about the direct map (that's always in the page
	 * tables), but we need to be careful about vmap space.  In
	 * particular, the top level page table can lazily propagate
	 * entries between processes, so if we've switched mms since we
	 * vmapped the target in the first place, we might not have the
	 * top-level page table entry populated.
	 *
	 * We disable preemption because we want the same mm active when
	 * we probe the target and when we issue the hypercall.  We'll
	 * have the same nominal mm, but if we're a kernel thread, lazy
	 * mm dropping could change our pgd.
	 *
	 * Out of an abundance of caution, this uses __get_user() to fault
	 * in the target address just in case there's some obscure case
	 * in which the target address isn't readable.
	 */

	preempt_disable();

	pagefault_disable();	/* Avoid warnings due to being atomic. */
	__get_user(dummy, (unsigned char __user __force *)v);
	pagefault_enable();

	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
		BUG();

	if (!PageHighMem(page)) {
		void *av = __va(PFN_PHYS(pfn));

		if (av != v)
			if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
				BUG();
	} else
		kmap_flush_unused();

	preempt_enable();
}
Ejemplo n.º 3
0
/*
 * Add a mapping for the machine page at the given virtual address.
 */
static void
map_ma_at_va(maddr_t ma, native_ptr_t va, uint_t level)
{
	x86pte_t *ptep;
	x86pte_t pteval;

	pteval = ma | pte_bits;
	if (level > 0)
		pteval |= PT_PAGESIZE;
	if (va >= target_kernel_text && pge_support)
		pteval |= PT_GLOBAL;

	if (map_debug && ma != va)
		dboot_printf("mapping ma=0x%" PRIx64 " va=0x%" PRIx64
		    " pte=0x%" PRIx64 " l=%d\n",
		    (uint64_t)ma, (uint64_t)va, pteval, level);

#if defined(__xpv)
	/*
	 * see if we can avoid find_pte() on the hypervisor
	 */
	if (HYPERVISOR_update_va_mapping(va, pteval,
	    UVMF_INVLPG | UVMF_LOCAL) == 0)
		return;
#endif

	/*
	 * Find the pte that will map this address. This creates any
	 * missing intermediate level page tables
	 */
	ptep = find_pte(va, NULL, level, 0);

	/*
	 * When paravirtualized, we must use hypervisor calls to modify the
	 * PTE, since paging is active. On real hardware we just write to
	 * the pagetables which aren't in use yet.
	 */
#if defined(__xpv)
	ptep = ptep;	/* shut lint up */
	if (HYPERVISOR_update_va_mapping(va, pteval, UVMF_INVLPG | UVMF_LOCAL))
		dboot_panic("mmu_update failed-map_pa_at_va va=0x%" PRIx64
		    " l=%d ma=0x%" PRIx64 ", pte=0x%" PRIx64 "",
		    (uint64_t)va, level, (uint64_t)ma, pteval);
#else
	if (va < 1024 * 1024)
		pteval |= PT_NOCACHE;		/* for video RAM */
	if (pae_support)
		*ptep = pteval;
	else
		*((x86pte32_t *)ptep) = (x86pte32_t)pteval;
#endif
}
Ejemplo n.º 4
0
/*
 * load_gdt for early boot, when the gdt is only mapped once
 */
static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{
	unsigned long va = dtr->address;
	unsigned int size = dtr->size + 1;
	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
	unsigned long frames[pages];
	int f;

	/*
	 * A GDT can be up to 64k in size, which corresponds to 8192
	 * 8-byte entries, or 16 4k pages..
	 */

	BUG_ON(size > 65536);
	BUG_ON(va & ~PAGE_MASK);

	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
		pte_t pte;
		unsigned long pfn, mfn;

		pfn = virt_to_pfn(va);
		mfn = pfn_to_mfn(pfn);

		pte = pfn_pte(pfn, PAGE_KERNEL_RO);

		if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
			BUG();

		frames[f] = mfn;
	}

	if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
		BUG();
}
Ejemplo n.º 5
0
void grants_init(void)
{
	unsigned long frames[NR_GRANT_PAGES];

	gnttab_setup_table_t op;
	op.dom = DOMID_SELF;
	op.nr_frames = NR_GRANT_PAGES;
	set_xen_guest_handle(op.frame_list, frames);
	int rs = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &op, 1);
	if (rs < 0)
		fatal_error("grants_init: setup_table failed: %d\n", rs);

	for (int i = NR_GRANT_ENTRIES-1; i >= NR_RESERVED_ENTRIES; i--)
	{
		free_list[i] = free_entry;
		free_entry = i;
	}

	grant_entries = mm_alloc_pages(NR_GRANT_PAGES);
	if (grant_entries == 0)
		fatal_error("grants_init: grant entries page allocation failed\n");
	for (int i = 0; i < NR_GRANT_PAGES; i++)
	{
		unsigned long ma_grant_table = frames[i] << PAGE_SHIFT;
		rs = HYPERVISOR_update_va_mapping((unsigned long)grant_entries + i*PAGE_SIZE,
			__pte(ma_grant_table | 7), UVMF_INVLPG);
		if (rs < 0)
			fatal_error("grants_init: update mapping failed: %d\n", rs);
	}
}
Ejemplo n.º 6
0
int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count)
{
	unsigned int i;
	int rc = 0;

	for (i = 0; i < count && rc == 0; ++i, ++map) {
		pte_t pte;

		if (!(map->flags & GNTMAP_host_map)
		    || !(map->flags & GNTMAP_application_map))
			continue;

#ifdef CONFIG_X86
		pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER
				| _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX
				| _PAGE_SPECIAL)
			       & __supported_pte_mask);
#else
#error Architecture not yet supported.
#endif
		if (!(map->flags & GNTMAP_readonly))
			pte = pte_mkwrite(pte);

		if (map->flags & GNTMAP_contains_pte) {
			mmu_update_t u;

			u.ptr = map->host_addr;
			u.val = __pte_val(pte);
			rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
		} else
			rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0);
	}

	return rc;
}
Ejemplo n.º 7
0
/* Main kernel entry point, called by trampoline */
void start_kernel(start_info_t * start_info)
{
	/* Define hypervisor upcall entry points */
        HYPERVISOR_set_callbacks(
                FLAT_KERNEL_CS, (unsigned long)hypervisor_callback,
                FLAT_KERNEL_CS, (unsigned long)failsafe_callback);
	/* Map the shared info page */
	HYPERVISOR_update_va_mapping((unsigned long) shared_info, 
			__pte(start_info->shared_info),
			UVMF_INVLPG);
	/* Initialise the console */
	console_init(start_info);
	/* Write a message to check that it worked */
	console_write("Hello world!\n\r");
	console_write("Xen magic string: ");
	console_write(start_info->magic);
	console_write("\n\r");

	/* Set up the XenStore driver */
	xenstore_init(start_info);
	/* Test the store */
	xenstore_test();
	/* Flush the console buffer */
	console_flush();
	/* Exit, since we don't know how to do anything else */
}
Ejemplo n.º 8
0
static void __meminit early_make_page_readonly(void *va, unsigned int feature)
{
	unsigned long addr, _va = (unsigned long)va;
	pte_t pte, *ptep;
	unsigned long *page = (unsigned long *) init_level4_pgt;

	BUG_ON(after_bootmem);

	if (xen_feature(feature))
		return;

	addr = (unsigned long) page[pgd_index(_va)];
	addr_to_page(addr, page);

	addr = page[pud_index(_va)];
	addr_to_page(addr, page);

	addr = page[pmd_index(_va)];
	addr_to_page(addr, page);

	ptep = (pte_t *) &page[pte_index(_va)];

	pte.pte = ptep->pte & ~_PAGE_RW;
	if (HYPERVISOR_update_va_mapping(_va, pte, 0))
		BUG();
}
Ejemplo n.º 9
0
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, pte_t pteval)
{
	ADD_STATS(set_pte_at, 1);
//	ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
	ADD_STATS(set_pte_at_current, mm == current->mm);
	ADD_STATS(set_pte_at_kernel, mm == &init_mm);

	if (mm == current->mm || mm == &init_mm) {
		if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
			struct multicall_space mcs;
			mcs = xen_mc_entry(0);

			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
			ADD_STATS(set_pte_at_batched, 1);
			xen_mc_issue(PARAVIRT_LAZY_MMU);
			goto out;
		} else
			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
				goto out;
	}
	xen_set_pte(ptep, pteval);

out:	return;
}
Ejemplo n.º 10
0
void
suspend_gnttab(void)
{
    int i;

    for (i = 0; i < NR_GRANT_FRAMES; i++) {
        HYPERVISOR_update_va_mapping((unsigned long)(((char *)gnttab_table) + PAGE_SIZE*i),
                (pte_t){0x0<<PAGE_SHIFT}, UVMF_INVLPG);
    }
}
Ejemplo n.º 11
0
/*
 * This function takes a contiguous pfn range that needs to be identity mapped
 * and:
 *
 *  1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
 *  2) Calls the do_ function to actually do the mapping/remapping work.
 *
 * The goal is to not allocate additional memory but to remap the existing
 * pages. In the case of an error the underlying memory is simply released back
 * to Xen and not remapped.
 */
static unsigned long __init xen_set_identity_and_remap_chunk(
	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
	unsigned long remap_pfn)
{
	unsigned long pfn;
	unsigned long i = 0;
	unsigned long n = end_pfn - start_pfn;

	if (remap_pfn == 0)
		remap_pfn = nr_pages;

	while (i < n) {
		unsigned long cur_pfn = start_pfn + i;
		unsigned long left = n - i;
		unsigned long size = left;
		unsigned long remap_range_size;

		/* Do not remap pages beyond the current allocation */
		if (cur_pfn >= nr_pages) {
			/* Identity map remaining pages */
			set_phys_range_identity(cur_pfn, cur_pfn + size);
			break;
		}
		if (cur_pfn + size > nr_pages)
			size = nr_pages - cur_pfn;

		remap_range_size = xen_find_pfn_range(&remap_pfn);
		if (!remap_range_size) {
			pr_warning("Unable to find available pfn range, not remapping identity pages\n");
			xen_set_identity_and_release_chunk(cur_pfn,
						cur_pfn + left, nr_pages);
			break;
		}
		/* Adjust size to fit in current e820 RAM region */
		if (size > remap_range_size)
			size = remap_range_size;

		xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);

		/* Update variables to reflect new mappings. */
		i += size;
		remap_pfn += size;
	}

	/*
	 * If the PFNs are currently mapped, the VA mapping also needs
	 * to be updated to be 1:1.
	 */
	for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
		(void)HYPERVISOR_update_va_mapping(
			(unsigned long)__va(pfn << PAGE_SHIFT),
			mfn_pte(pfn, PAGE_KERNEL_IO), 0);

	return remap_pfn;
}
Ejemplo n.º 12
0
static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
{
	struct page *page = virt_to_page(pt);
	unsigned long pfn = page_to_pfn(page);

	if (PageHighMem(page))
		return;
	BUG_ON(HYPERVISOR_update_va_mapping(
		(unsigned long)__va(pfn << PAGE_SHIFT),
		pfn_pte(pfn, flags), 0));
}
Ejemplo n.º 13
0
static
shared_info_t *map_shared_info(unsigned long pa)
{
	if ( HYPERVISOR_update_va_mapping(
		(unsigned long)shared_info, __pte(pa | 7), UVMF_INVLPG) )
	{
		printk("Failed to map shared_info!!\n");
		do_exit();
	}
	return (shared_info_t *)shared_info;
}
Ejemplo n.º 14
0
void pte_free(struct page *pte)
{
	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);

	if (!pte_write(*virt_to_ptep(va)))
		BUG_ON(HYPERVISOR_update_va_mapping(
			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));

	ClearPageForeign(pte);
	init_page_count(pte);

	__free_page(pte);
}
Ejemplo n.º 15
0
void xen_arch_pre_suspend(void)
{
	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
	xen_start_info->console.domU.mfn =
		mfn_to_pfn(xen_start_info->console.domU.mfn);

	BUG_ON(!irqs_disabled());

	HYPERVISOR_shared_info = &xen_dummy_shared_info;
	if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
					 __pte_ma(0), 0))
		BUG();
}
Ejemplo n.º 16
0
void make_lowmem_page_readwrite(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;

	pte = lookup_address(address);
	BUG_ON(pte == NULL);

	ptev = pte_mkwrite(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}
Ejemplo n.º 17
0
static
shared_info_t *map_shared_info(unsigned long pa)
{
    int rc;

	if ( (rc = HYPERVISOR_update_va_mapping(
              (unsigned long)_minios_shared_info, __pte(pa | 7), UVMF_INVLPG)) )
	{
		minios_printk("Failed to map shared_info!! rc=%d\n", rc);
		minios_do_exit();
	}
	return (shared_info_t *)_minios_shared_info;
}
Ejemplo n.º 18
0
void make_lowmem_page_writable(void *va, unsigned int feature)
{
	pte_t *pte;
	int rc;

	if (xen_feature(feature))
		return;

	pte = virt_to_ptep(va);
	rc = HYPERVISOR_update_va_mapping(
		(unsigned long)va, pte_mkwrite(*pte), 0);
	BUG_ON(rc);
}
Ejemplo n.º 19
0
void make_lowmem_page_readonly(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;

	pte = lookup_address(address, &level);
	BUG_ON(pte == NULL);

	ptev = pte_wrprotect(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}
Ejemplo n.º 20
0
void make_lowmem_page_readwrite(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;

	pte = lookup_address(address, &level);
	if (pte == NULL)
		return;		/* vaddr missing */

	ptev = pte_mkwrite(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}
Ejemplo n.º 21
0
static pgentry_t new_pt_page(unsigned long *pt_pfn,
	pgentry_t *higher_tab, unsigned int higher_off, int level)
{
	// *pt_pfn is already mapped by domain builder
	// remap *pt_pfn as readonly, suitable for page table/directory
	// update higher_tab[higher_off] with reference to *pt_pfn
	// (*pt_pfn)++;

	pgentry_t prot_e, prot_t;
	prot_e = prot_t = 0;

    switch ( level )
    {
    case L1_FRAME:
        prot_e = L1_PROT;
        prot_t = L2_PROT;
        break;
    case L2_FRAME:
        prot_e = L2_PROT;
        prot_t = L3_PROT;
        break;
#if defined(__x86_64__)
	case L3_FRAME:
		prot_e = L3_PROT;
		prot_t = L4_PROT;
		break;
#endif
    default:
    	fatal_error("new_pt_page: level?");
    }

	void *pt_va = pfn_to_virt(*pt_pfn);
	memset(pt_va, 0, PAGE_SIZE);	// all entries not present
	
	unsigned long pte0 = pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | (prot_e & ~_PAGE_RW);
	HYPERVISOR_update_va_mapping((unsigned long)pt_va, __pte(pte0), UVMF_INVLPG);
	
	pgentry_t pte = (pfn_to_mfn(*pt_pfn) << PAGE_SHIFT) | prot_t;
	
	struct mmu_update mu;
	mu.ptr = (virt_to_mfn(higher_tab) << PAGE_SHIFT) + sizeof(pgentry_t)*higher_off;
	mu.val = pte;
	HYPERVISOR_mmu_update(&mu, 1, 0, DOMID_SELF);
	
	(*pt_pfn)++;

	return pte;
}
Ejemplo n.º 22
0
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, pte_t pteval)
{
	if (mm == current->mm || mm == &init_mm) {
		if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
			struct multicall_space mcs;
			mcs = xen_mc_entry(0);

			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
			xen_mc_issue(PARAVIRT_LAZY_MMU);
			return;
		} else
			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
				return;
	}
	xen_set_pte(ptep, pteval);
}
Ejemplo n.º 23
0
static void __make_page_writable(void *va)
{
	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
	unsigned long addr = (unsigned long) va;

	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
	ptep = pte_offset_kernel(pmd, addr);

	pte.pte = ptep->pte | _PAGE_RW;
	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
		xen_l1_entry_update(ptep, pte); /* fallback */

	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
		__make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
}
Ejemplo n.º 24
0
void
resume_gnttab(void)
{
    int i;
    struct gnttab_setup_table setup;
    unsigned long frames[NR_GRANT_FRAMES];

    setup.dom = DOMID_SELF;
    setup.nr_frames = NR_GRANT_FRAMES;
    set_xen_guest_handle(setup.frame_list, frames);

    HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);

    for (i = 0; i < NR_GRANT_FRAMES; i++) {
        HYPERVISOR_update_va_mapping((unsigned long)(((char *)gnttab_table) + PAGE_SIZE*i),
                (pte_t){(frames[i] << PAGE_SHIFT) | L1_PROT}, UVMF_INVLPG);
    }
}
Ejemplo n.º 25
0
/* Main kernel entry point, called by trampoline */
void start_kernel(start_info_t * start_info)
{
	/* Map the shared info page */
	HYPERVISOR_update_va_mapping((unsigned long) &shared_info, 
			__pte(start_info->shared_info | 7),
			UVMF_INVLPG);
	/* Set the pointer used in the bootstrap for reenabling
	 * event delivery after an upcall */
	HYPERVISOR_shared_info = &shared_info;
	/* Set up and unmask events */
	init_events();
	/* Initialise the console */
	console_init(start_info);
	/* Write a message to check that it worked */
	console_write("Hello world!\r\n");
	/* Loop, handling events */
	while(1)
	{
		HYPERVISOR_sched_op(SCHEDOP_block,0);
	}
}
Ejemplo n.º 26
0
void make_page_writable(void *va, unsigned int feature)
{
	pte_t *pte;
	int rc;

	if (xen_feature(feature))
		return;

	pte = virt_to_ptep(va);
	rc = HYPERVISOR_update_va_mapping(
		(unsigned long)va, pte_mkwrite(*pte), 0);
	if (rc) /* fallback? */
		xen_l1_entry_update(pte, pte_mkwrite(*pte));
	if ((unsigned long)va >= (unsigned long)high_memory) {
		unsigned long pfn = pte_pfn(*pte); 
#ifdef CONFIG_HIGHMEM
		if (pfn < highstart_pfn)
#endif
			make_lowmem_page_writable(
				phys_to_virt(pfn << PAGE_SHIFT), feature);
	}
}
Ejemplo n.º 27
0
paddr_t
make_ptable(x86pte_t *pteval, uint_t level)
{
	paddr_t new_table = (paddr_t)(uintptr_t)mem_alloc(MMU_PAGESIZE);

	if (level == top_level && level == 2)
		*pteval = pa_to_ma((uintptr_t)new_table) | PT_VALID;
	else
		*pteval = pa_to_ma((uintptr_t)new_table) | ptp_bits;

#ifdef __xpv
	/* Remove write permission to the new page table. */
	if (HYPERVISOR_update_va_mapping(new_table,
	    *pteval & ~(x86pte_t)PT_WRITABLE, UVMF_INVLPG | UVMF_LOCAL))
		dboot_panic("HYP_update_va_mapping error");
#endif

	if (map_debug)
		dboot_printf("new page table lvl=%d paddr=0x%lx ptp=0x%"
		    PRIx64 "\n", level, (ulong_t)new_table, *pteval);
	return (new_table);
}
Ejemplo n.º 28
0
static int handle_cow(unsigned long addr) {
        pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
	unsigned long new_page;
	int rc;

        page = tab[l4_table_offset(addr)];
	if (!(page & _PAGE_PRESENT))
	    return 0;
        tab = pte_to_virt(page);

        page = tab[l3_table_offset(addr)];
	if (!(page & _PAGE_PRESENT))
	    return 0;
        tab = pte_to_virt(page);

        page = tab[l2_table_offset(addr)];
	if (!(page & _PAGE_PRESENT))
	    return 0;
        tab = pte_to_virt(page);
        
        page = tab[l1_table_offset(addr)];
	if (!(page & _PAGE_PRESENT))
	    return 0;
	/* Only support CoW for the zero page.  */
	if (PHYS_PFN(page) != mfn_zero)
	    return 0;

	new_page = alloc_pages(0);
	memset((void*) new_page, 0, PAGE_SIZE);

	rc = HYPERVISOR_update_va_mapping(addr & PAGE_MASK, __pte(virt_to_mach(new_page) | L1_PROT), UVMF_INVLPG);
	if (!rc)
		return 1;

	printk("Map zero page to %lx failed: %d.\n", addr, rc);
	return 0;
}
Ejemplo n.º 29
0
/* Main kernel entry point, called by trampoline */
void start_kernel (start_info_t *start_info)
{
    sched_shutdown_t op;

    /* Map the shared info page */
    HYPERVISOR_update_va_mapping((unsigned long) &shared_info,
                                 __pte(start_info->shared_info | 7),
                                 UVMF_INVLPG);

    HYPERVISOR_shared_info = &shared_info;

    init_events ();
    console_init (start_info);
    console_write ("Just a hello!\r\n");

    snprintf (buf, sizeof (buf), "1: %d, 2: %x\r\n", 1234, 1234);
    console_write (buf);

    /* 	op.reason = SHUTDOWN_poweroff; */
    /* 	HYPERVISOR_sched_op (SCHEDOP_shutdown, &op); */

    while (1)
        HYPERVISOR_sched_op (SCHEDOP_block, 0);
}
Ejemplo n.º 30
0
void make_page_readonly(void *va, unsigned int feature)
{
	pte_t *pte;
	int rc;

	if (xen_feature(feature))
		return;

	pte = virt_to_ptep(va);
	rc = HYPERVISOR_update_va_mapping(
		(unsigned long)va, pte_wrprotect(*pte), 0);
	if (rc) /* fallback? */
		xen_l1_entry_update(pte, pte_wrprotect(*pte));
	if ((unsigned long)va >= (unsigned long)high_memory) {
		unsigned long pfn = pte_pfn(*pte);
#ifdef CONFIG_HIGHMEM
		if (pfn >= highstart_pfn)
			kmap_flush_unused(); /* flush stale writable kmaps */
		else
#endif
			make_lowmem_page_readonly(
				phys_to_virt(pfn << PAGE_SHIFT), feature); 
	}
}