Exemplo n.º 1
0
Arquivo: p2m.c Projeto: DenisLug/mptcp
/*
 * Allocate new pmd(s). It is checked whether the old pmd is still in place.
 * If not, nothing is changed. This is okay as the only reason for allocating
 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
 */
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{
	pte_t *ptechk;
	pte_t *pte_newpg[PMDS_PER_MID_PAGE];
	pmd_t *pmdp;
	unsigned int level;
	unsigned long flags;
	unsigned long vaddr;
	int i;

	/* Do all allocations first to bail out in error case. */
	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		pte_newpg[i] = alloc_p2m_page();
		if (!pte_newpg[i]) {
			for (i--; i >= 0; i--)
				free_p2m_page(pte_newpg[i]);

			return NULL;
		}
	}

	vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);

	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		copy_page(pte_newpg[i], pte_pg);
		paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);

		pmdp = lookup_pmd_address(vaddr);
		BUG_ON(!pmdp);

		spin_lock_irqsave(&p2m_update_lock, flags);

		ptechk = lookup_address(vaddr, &level);
		if (ptechk == pte_pg) {
			HYPERVISOR_shared_info->arch.p2m_generation++;
			wmb(); /* Tools are synchronizing via p2m_generation. */
			set_pmd(pmdp,
				__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
			wmb(); /* Tools are synchronizing via p2m_generation. */
			HYPERVISOR_shared_info->arch.p2m_generation++;
			pte_newpg[i] = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);

		if (pte_newpg[i]) {
			paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
			free_p2m_page(pte_newpg[i]);
		}

		vaddr += PMD_SIZE;
	}

	return lookup_address(addr, &level);
}
Exemplo n.º 2
0
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
    pte_t *ptep;
    unsigned int level;

    /* don't track P2M changes in autotranslate guests */
    if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
        return true;

    if (unlikely(pfn >= xen_p2m_size)) {
        BUG_ON(mfn != INVALID_P2M_ENTRY);
        return true;
    }

    if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
        return true;

    ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
    BUG_ON(!ptep || level != PG_LEVEL_4K);

    if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
        return mfn == INVALID_P2M_ENTRY;

    if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
        return mfn == IDENTITY_FRAME(pfn);

    return false;
}
Exemplo n.º 3
0
/** 
 * NOTE:
 *  * the prototype of lookup_address() is changed in 2.6.25
 *  * lookup_address() is not exported until 2.6.27
 */
static void make_page_rw(unsigned long addr)
{
	unsigned int level;
	pte_t *pte = lookup_address(addr, &level);
	*pte = pte_mkwrite(*pte);
	flush_cache_all();
}
void
GAS_addresses_destroy (const struct GNUNET_PeerIdentity *peer,
                       const char *plugin_name, const void *plugin_addr,
                       size_t plugin_addr_len, uint32_t session_id)
{
  struct ATS_Address *aa;
  struct ATS_Address *old;

  if (GNUNET_NO == running)
    return;

  /* Get existing address */
  old = lookup_address (peer, plugin_name, plugin_addr, plugin_addr_len,
                       session_id, NULL, 0);
  if (old == NULL)
  {
    GNUNET_log (GNUNET_ERROR_TYPE_WARNING, "Tried to destroy unknown address for peer `%s' `%s' session id %u\n",
                GNUNET_i2s (peer), plugin_name, session_id);
    return;
  }


  GNUNET_break (0 < strlen (plugin_name));
  aa = create_address (peer, plugin_name, plugin_addr, plugin_addr_len, session_id);

  GNUNET_CONTAINER_multihashmap_get_multiple (addresses, &peer->hashPubKey,
                                              &destroy_by_session_id, aa);

  free_address (aa);
}
Exemplo n.º 5
0
/*
 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
 *
 * This is called both at boot time, and after resuming from suspend:
 * - At boot time we're called rather early, and must use alloc_bootmem*()
 *   to allocate memory.
 *
 * - After resume we're called from within stop_machine, but the mfn
 *   tree should already be completely allocated.
 */
void __ref xen_build_mfn_list_list(void)
{
    unsigned long pfn, mfn;
    pte_t *ptep;
    unsigned int level, topidx, mididx;
    unsigned long *mid_mfn_p;

    if (xen_feature(XENFEAT_auto_translated_physmap))
        return;

    /* Pre-initialize p2m_top_mfn to be completely missing */
    if (p2m_top_mfn == NULL) {
        p2m_mid_missing_mfn = alloc_p2m_page();
        p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);

        p2m_top_mfn_p = alloc_p2m_page();
        p2m_top_mfn_p_init(p2m_top_mfn_p);

        p2m_top_mfn = alloc_p2m_page();
        p2m_top_mfn_init(p2m_top_mfn);
    } else {
        /* Reinitialise, mfn's all change after migration */
        p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
    }

    for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
            pfn += P2M_PER_PAGE) {
        topidx = p2m_top_index(pfn);
        mididx = p2m_mid_index(pfn);

        mid_mfn_p = p2m_top_mfn_p[topidx];
        ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
                              &level);
        BUG_ON(!ptep || level != PG_LEVEL_4K);
        mfn = pte_mfn(*ptep);
        ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));

        /* Don't bother allocating any mfn mid levels if
         * they're just missing, just update the stored mfn,
         * since all could have changed over a migrate.
         */
        if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
            BUG_ON(mididx);
            BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
            p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
            pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
            continue;
        }

        if (mid_mfn_p == p2m_mid_missing_mfn) {
            mid_mfn_p = alloc_p2m_page();
            p2m_mid_mfn_init(mid_mfn_p, p2m_missing);

            p2m_top_mfn_p[topidx] = mid_mfn_p;
        }

        p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
        mid_mfn_p[mididx] = mfn;
    }
}
Exemplo n.º 6
0
static int __init
diamorphine_init(void)
{
	unsigned int level;

	sys_call_table = get_syscall_table_bf();
	if (!sys_call_table)
		return -1;

	pte = lookup_address((unsigned long)sys_call_table, &level);
	if (!pte)
		return -1;

	module_hide();
	tidy();

	orig_getdents = (orig_getdents_t)sys_call_table[__NR_getdents];
	orig_getdents64 = (orig_getdents64_t)sys_call_table[__NR_getdents64];
	orig_kill = (orig_kill_t)sys_call_table[__NR_kill];

	unprotect_memory();
	sys_call_table[__NR_getdents] = (unsigned long)hacked_getdents;
	sys_call_table[__NR_getdents64] = (unsigned long)hacked_getdents64;
	sys_call_table[__NR_kill] = (unsigned long)hacked_kill;
	protect_memory();

	return 0;
}
Exemplo n.º 7
0
static int print_split(struct split_state *s)
{
	long i, expected, missed = 0;
	int err = 0;

	s->lpg = s->gpg = s->spg = s->exec = 0;
	s->min_exec = ~0UL;
	s->max_exec = 0;
	for (i = 0; i < max_pfn_mapped; ) {
		unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
		unsigned int level;
		pte_t *pte;

		pte = lookup_address(addr, &level);
		if (!pte) {
			missed++;
			i++;
			continue;
		}

		if (level == PG_LEVEL_1G && sizeof(long) == 8) {
			s->gpg++;
			i += GPS/PAGE_SIZE;
		} else if (level == PG_LEVEL_2M) {
			if (!(pte_val(*pte) & _PAGE_PSE)) {
				printk(KERN_ERR
					"%lx level %d but not PSE %Lx\n",
					addr, level, (u64)pte_val(*pte));
				err = 1;
			}
			s->lpg++;
			i += LPS/PAGE_SIZE;
		} else {
			s->spg++;
			i++;
		}
		if (!(pte_val(*pte) & _PAGE_NX)) {
			s->exec++;
			if (addr < s->min_exec)
				s->min_exec = addr;
			if (addr > s->max_exec)
				s->max_exec = addr;
		}
	}
	if (print) {
		printk(KERN_INFO
			" 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
			s->spg, s->lpg, s->gpg, s->exec,
			s->min_exec != ~0UL ? s->min_exec : 0,
			s->max_exec, missed);
	}

	expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
	if (expected != i) {
		printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n",
			max_pfn_mapped, expected);
		return 1;
	}
	return err;
}
Exemplo n.º 8
0
Arquivo: p2m.c Projeto: DenisLug/mptcp
unsigned long get_phys_to_machine(unsigned long pfn)
{
	pte_t *ptep;
	unsigned int level;

	if (unlikely(pfn >= xen_p2m_size)) {
		if (pfn < xen_max_p2m_pfn)
			return xen_chk_extra_mem(pfn);

		return IDENTITY_FRAME(pfn);
	}

	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);

	/*
	 * The INVALID_P2M_ENTRY is filled in both p2m_*identity
	 * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
	 * would be wrong.
	 */
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
		return IDENTITY_FRAME(pfn);

	return xen_p2m_addr[pfn];
}
Exemplo n.º 9
0
Arquivo: p2m.c Projeto: DenisLug/mptcp
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
	pte_t *ptep;
	unsigned int level;

	/* don't track P2M changes in autotranslate guests */
	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
		return true;

	if (unlikely(pfn >= xen_p2m_size)) {
		BUG_ON(mfn != INVALID_P2M_ENTRY);
		return true;
	}

	/*
	 * The interface requires atomic updates on p2m elements.
	 * xen_safe_write_ulong() is using __put_user which does an atomic
	 * store via asm().
	 */
	if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
		return true;

	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);

	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
		return mfn == INVALID_P2M_ENTRY;

	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
		return mfn == IDENTITY_FRAME(pfn);

	return false;
}
Exemplo n.º 10
0
/*
 * Set the page permissions for a particular virtual address.  If the
 * address is a vmalloc mapping (or other non-linear mapping), then
 * find the linear mapping of the page and also set its protections to
 * match.
 */
static void set_aliased_prot(void *v, pgprot_t prot)
{
	int level;
	pte_t *ptep;
	pte_t pte;
	unsigned long pfn;
	struct page *page;

	ptep = lookup_address((unsigned long)v, &level);
	BUG_ON(ptep == NULL);

	pfn = pte_pfn(*ptep);
	page = pfn_to_page(pfn);

	pte = pfn_pte(pfn, prot);

	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
		BUG();

	if (!PageHighMem(page)) {
		void *av = __va(PFN_PHYS(pfn));

		if (av != v)
			if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
				BUG();
	} else
		kmap_flush_unused();
}
Exemplo n.º 11
0
static void xen_load_gdt(const struct desc_ptr *dtr)
{
	unsigned long va = dtr->address;
	unsigned int size = dtr->size + 1;
	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
	unsigned long frames[pages];
	int f;

	/* A GDT can be up to 64k in size, which corresponds to 8192
	   8-byte entries, or 16 4k pages.. */

	BUG_ON(size > 65536);
	BUG_ON(va & ~PAGE_MASK);

	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
		int level;
		pte_t *ptep = lookup_address(va, &level);
		unsigned long pfn, mfn;
		void *virt;

		BUG_ON(ptep == NULL);

		pfn = pte_pfn(*ptep);
		mfn = pfn_to_mfn(pfn);
		virt = __va(PFN_PHYS(pfn));

		frames[f] = mfn;

		make_lowmem_page_readonly((void *)va);
		make_lowmem_page_readonly(virt);
	}

	if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
		BUG();
}
Exemplo n.º 12
0
static void make_page_ro(unsigned long addr)
{
	unsigned int level;
	pte_t *pte = lookup_address(addr, &level);
	*pte = pte_wrprotect(*pte);
	flush_cache_all();
}
Exemplo n.º 13
0
int make_rw(unsigned long address) {
		unsigned int level;
		pte_t *pte = lookup_address(address, &level);
		if(pte->pte &~ _PAGE_RW) {
				pte->pte |= _PAGE_RW;
		}
		return 0;
}
Exemplo n.º 14
0
static void write_addr(u128 * target, u128 * inject)
{
    pte_t * ppt, pt;
    unsigned int level;

    if(pte_write(*lookup_address((unsigned long)target, &level)) == 0) {
        ppt = lookup_address((unsigned long)target, &level);
        pt = pte_mkwrite(*ppt);
        set_pte(ppt, pt);
        *target = *inject;
        ppt = lookup_address((unsigned long)target, &level);
        pt = pte_wrprotect(*ppt);
        set_pte(ppt, pt);
    }else {
        *target = *inject;
    }
}
Exemplo n.º 15
0
void set_addr_ro(unsigned long addr) {

    unsigned int level;
    pte_t *pte = lookup_address(addr, &level);

    pte->pte = pte->pte &~_PAGE_RW;

}
Exemplo n.º 16
0
void set_addr_rw(unsigned long addr) {

    unsigned int level;
    pte_t *pte = lookup_address(addr, &level);

    if (pte->pte &~ _PAGE_RW) pte->pte |= _PAGE_RW;

}
Exemplo n.º 17
0
/*
 * Set the page permissions for a particular virtual address.  If the
 * address is a vmalloc mapping (or other non-linear mapping), then
 * find the linear mapping of the page and also set its protections to
 * match.
 */
static void set_aliased_prot(void *v, pgprot_t prot)
{
	int level;
	pte_t *ptep;
	pte_t pte;
	unsigned long pfn;
	struct page *page;
	unsigned char dummy;

	ptep = lookup_address((unsigned long)v, &level);
	BUG_ON(ptep == NULL);

	pfn = pte_pfn(*ptep);
	page = pfn_to_page(pfn);

	pte = pfn_pte(pfn, prot);

	/*
	 * Careful: update_va_mapping() will fail if the virtual address
	 * we're poking isn't populated in the page tables.  We don't
	 * need to worry about the direct map (that's always in the page
	 * tables), but we need to be careful about vmap space.  In
	 * particular, the top level page table can lazily propagate
	 * entries between processes, so if we've switched mms since we
	 * vmapped the target in the first place, we might not have the
	 * top-level page table entry populated.
	 *
	 * We disable preemption because we want the same mm active when
	 * we probe the target and when we issue the hypercall.  We'll
	 * have the same nominal mm, but if we're a kernel thread, lazy
	 * mm dropping could change our pgd.
	 *
	 * Out of an abundance of caution, this uses __get_user() to fault
	 * in the target address just in case there's some obscure case
	 * in which the target address isn't readable.
	 */

	preempt_disable();

	pagefault_disable();	/* Avoid warnings due to being atomic. */
	__get_user(dummy, (unsigned char __user __force *)v);
	pagefault_enable();

	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
		BUG();

	if (!PageHighMem(page)) {
		void *av = __va(PFN_PHYS(pfn));

		if (av != v)
			if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
				BUG();
	} else
		kmap_flush_unused();

	preempt_enable();
}
Exemplo n.º 18
0
xmaddr_t arbitrary_virt_to_machine(unsigned long address)
{
	pte_t *pte = lookup_address(address);
	unsigned offset = address & PAGE_MASK;

	BUG_ON(pte == NULL);

	return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
}
Exemplo n.º 19
0
int set_page_rw(ptr_t address) {
    unsigned int level;
    pte_t *pte = lookup_address(address, &level);

    if (pte->pte &~ _PAGE_RW) {
        pte->pte |= _PAGE_RW;
    }

    return 0;
}
Exemplo n.º 20
0
static xen_pfn_t pvh_get_grant_pfn(int grant_idx)
{
	unsigned long vaddr;
	unsigned int level;
	pte_t *pte;

	vaddr = (unsigned long)(gnttab_shared.addr) + grant_idx * PAGE_SIZE;
	pte = lookup_address(vaddr, &level);
	BUG_ON(pte == NULL);
	return pte_mfn(*pte);
}
Exemplo n.º 21
0
/* Add an MFN override for a particular page */
static int m2p_add_override(unsigned long mfn, struct page *page,
			    struct gnttab_map_grant_ref *kmap_op)
{
	unsigned long flags;
	unsigned long pfn;
	unsigned long uninitialized_var(address);
	unsigned level;
	pte_t *ptep = NULL;

	pfn = page_to_pfn(page);
	if (!PageHighMem(page)) {
		address = (unsigned long)__va(pfn << PAGE_SHIFT);
		ptep = lookup_address(address, &level);
		if (WARN(ptep == NULL || level != PG_LEVEL_4K,
			 "m2p_add_override: pfn %lx not mapped", pfn))
			return -EINVAL;
	}

	if (kmap_op != NULL) {
		if (!PageHighMem(page)) {
			struct multicall_space mcs =
				xen_mc_entry(sizeof(*kmap_op));

			MULTI_grant_table_op(mcs.mc,
					GNTTABOP_map_grant_ref, kmap_op, 1);

			xen_mc_issue(PARAVIRT_LAZY_MMU);
		}
	}
	spin_lock_irqsave(&m2p_override_lock, flags);
	list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
	spin_unlock_irqrestore(&m2p_override_lock, flags);

	/* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
	 * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
	 * pfn so that the following mfn_to_pfn(mfn) calls will return the
	 * pfn from the m2p_override (the backend pfn) instead.
	 * We need to do this because the pages shared by the frontend
	 * (xen-blkfront) can be already locked (lock_page, called by
	 * do_read_cache_page); when the userspace backend tries to use them
	 * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
	 * do_blockdev_direct_IO is going to try to lock the same pages
	 * again resulting in a deadlock.
	 * As a side effect get_user_pages_fast might not be safe on the
	 * frontend pages while they are being shared with the backend,
	 * because mfn_to_pfn (that ends up being called by GUPF) will
	 * return the backend pfn rather than the frontend pfn. */
	pfn = mfn_to_pfn_no_overrides(mfn);
	if (__pfn_to_mfn(pfn) == mfn)
		set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));

	return 0;
}
Exemplo n.º 22
0
static int
__change_page_attr(struct page *page, pgprot_t prot)
{ 
	pte_t *kpte; 
	unsigned long address;
	struct page *kpte_page;

	BUG_ON(PageHighMem(page));
	address = (unsigned long)page_address(page);

	kpte = lookup_address(address);
	if (!kpte)
		return -EINVAL;
	kpte_page = virt_to_page(kpte);
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
			set_pte_atomic(kpte, mk_pte(page, prot)); 
		} else {
			pgprot_t ref_prot;
			struct page *split;

			ref_prot =
			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
			split = split_large_page(address, prot, ref_prot);
			if (!split)
				return -ENOMEM;
			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
			kpte_page = split;
		}
		page_private(kpte_page)++;
	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
		BUG_ON(page_private(kpte_page) == 0);
		page_private(kpte_page)--;
	} else
		BUG();

	/*
	 * If the pte was reserved, it means it was created at boot
	 * time (not via split_large_page) and in turn we must not
	 * replace it with a largepage.
	 */
	if (!PageReserved(kpte_page)) {
		if (cpu_has_pse && (page_private(kpte_page) == 0)) {
			ClearPagePrivate(kpte_page);
			list_add(&kpte_page->lru, &df_list);
			revert_page(kpte_page, address);
		}
	}
	return 0;
} 
Exemplo n.º 23
0
void make_lowmem_page_readwrite(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;

	pte = lookup_address(address);
	BUG_ON(pte == NULL);

	ptev = pte_mkwrite(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}
Exemplo n.º 24
0
void set_addr_rw(unsigned long addr, bool *flag) {

    unsigned int level;
    pte_t *pte;

    *flag = true;

    pte = lookup_address(addr, &level);

    if (pte->pte & _PAGE_RW) *flag = false;
    else pte->pte |= _PAGE_RW;

}
Exemplo n.º 25
0
void set_addr_ro(unsigned long addr, bool flag) {

    unsigned int level;
    pte_t *pte;

    // only set back to readonly if it was readonly before
    if (flag) {
        pte = lookup_address(addr, &level);

        pte->pte = pte->pte &~_PAGE_RW;
    }

}
Exemplo n.º 26
0
void make_lowmem_page_readonly(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;

	pte = lookup_address(address, &level);
	BUG_ON(pte == NULL);

	ptev = pte_wrprotect(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}
Exemplo n.º 27
0
static int read_managers(node *n, const char *nodesfile, endpointid **outids, int *outcount)
{
  endpointid *managerids;
  int count;
  array *nodes = read_nodes(nodesfile);
  int i;

  *outids = NULL;
  *outcount = 0;

  if (NULL == nodes)
    return -1;

  count = array_count(nodes);
  managerids = (endpointid*)calloc(count,sizeof(endpointid));

  for (i = 0; i < count; i++) {
    char *nodename = array_item(nodes,i,char*);
    char *host = NULL;
    int port = 0;

    if (NULL == strchr(nodename,':')) {
      host = strdup(nodename);
      port = WORKER_PORT;
    }
    else if (0 > parse_address(nodename,&host,&port)) {
      fprintf(stderr,"Invalid node address: %s\n",nodename);
      array_free(nodes);
      free(managerids);
      return -1;
    }
    managerids[i].port = port;
    managerids[i].localid = MANAGER_ID;

    if (0 > lookup_address(host,&managerids[i].ip,NULL)) {
      fprintf(stderr,"%s: hostname lookup failed\n",host);
      array_free(nodes);
      free(managerids);
      return -1;
    }
    free(host);
  }

  *outids = managerids;
  *outcount = count;

  array_free(nodes);
  return 0;
}
Exemplo n.º 28
0
void make_lowmem_page_readwrite(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;

	pte = lookup_address(address, &level);
	if (pte == NULL)
		return;		/* vaddr missing */

	ptev = pte_mkwrite(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}
Exemplo n.º 29
0
static int
__change_page_attr(struct page *page, pgprot_t prot)
{ 
	pte_t *kpte; 
	unsigned long address;
	struct page *kpte_page;

	BUG_ON(PageHighMem(page));
	address = (unsigned long)page_address(page);

	kpte = lookup_address(address);
	if (!kpte)
		return -EINVAL;
	kpte_page = virt_to_page(kpte);
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
			set_pte_atomic(kpte, mk_pte(page, prot)); 
		} else {
			struct page *split = split_large_page(address, prot); 
			if (!split)
				return -ENOMEM;
			set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
			kpte_page = split;
		}	
		get_page(kpte_page);
	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
		__put_page(kpte_page);
	} else
		BUG();

	/*
	 * If the pte was reserved, it means it was created at boot
	 * time (not via split_large_page) and in turn we must not
	 * replace it with a largepage.
	 */
	if (!PageReserved(kpte_page)) {
		/* memleak and potential failed 2M page regeneration */
		BUG_ON(!page_count(kpte_page));

		if (cpu_has_pse && (page_count(kpte_page) == 1)) {
			list_add(&kpte_page->lru, &df_list);
			revert_page(kpte_page, address);
		}
	}
	return 0;
} 
Exemplo n.º 30
0
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
	unsigned long address = (unsigned long)vaddr;
	unsigned int level;
	pte_t *pte;
	unsigned offset;

	if (virt_addr_valid(vaddr))
		return virt_to_machine(vaddr);

	

	pte = lookup_address(address, &level);
	BUG_ON(pte == NULL);
	offset = address & ~PAGE_MASK;
	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
}