Exemple #1
0
/* 
 * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional.
 *          This to assist debug of modules in the guest. The kernel address 
 *          space seems is always mapped, but modules are not necessarily 
 *          mapped in any arbitraty guest cr3 that we pick if pgd3val is 0. 
 *          Modules should always be addressible if we use cr3 from init_mm. 
 *          Since pgd3val is already a pgd value, cr3->pgd[3], we just need to 
 *          do 2 level lookups.
 *
 * NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e
 *       mode.
 * Returns: mfn for the given (pv guest) vaddr 
 */
static unsigned long 
dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
{
    l4_pgentry_t l4e, *l4t;
    l3_pgentry_t l3e, *l3t;
    l2_pgentry_t l2e, *l2t;
    l1_pgentry_t l1e, *l1t;
    unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
    unsigned long mfn = cr3 >> PAGE_SHIFT;

    DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id, 
          cr3, pgd3val);

    if ( pgd3val == 0 )
    {
        l4t = mfn_to_virt(mfn);
        l4e = l4t[l4_table_offset(vaddr)];
        mfn = l4e_get_pfn(l4e);
        DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%lx\n", l4t, 
              l4_table_offset(vaddr), l4e, mfn);
        if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
        {
            DBGP1("l4 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
            return INVALID_MFN;
        }

        l3t = mfn_to_virt(mfn);
        l3e = l3t[l3_table_offset(vaddr)];
        mfn = l3e_get_pfn(l3e);
        DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%lx\n", l3t, 
              l3_table_offset(vaddr), l3e, mfn);
        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
        {
            DBGP1("l3 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
            return INVALID_MFN;
        }
    }

    l2t = mfn_to_virt(mfn);
    l2e = l2t[l2_table_offset(vaddr)];
    mfn = l2e_get_pfn(l2e);
    DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr),
          l2e, mfn);
    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
         (l2e_get_flags(l2e) & _PAGE_PSE) )
    {
        DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
        return INVALID_MFN;
    }
    l1t = mfn_to_virt(mfn);
    l1e = l1t[l1_table_offset(vaddr)];
    mfn = l1e_get_pfn(l1e);
    DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vaddr),
          l1e, mfn);

    return mfn_valid(mfn) ? mfn : INVALID_MFN;
}
Exemple #2
0
struct consfront_dev *xencons_ring_init(void)
{
	int err;
	struct consfront_dev *dev;

	if (!start_info.console.domU.evtchn)
		return 0;

	dev = malloc(sizeof(struct consfront_dev));
	memset(dev, 0, sizeof(struct consfront_dev));
	dev->nodename = "device/console";
	dev->dom = 0;
	dev->backend = 0;
	dev->ring_ref = 0;

#ifdef HAVE_LIBC
	dev->fd = -1;
#endif
	dev->evtchn = start_info.console.domU.evtchn;
	dev->ring = (struct xencons_interface *) mfn_to_virt(start_info.console.domU.mfn);

	err = bind_evtchn(dev->evtchn, console_handle_input, dev);
	if (err <= 0) {
		printk("XEN console request chn bind failed %i\n", err);
                free(dev);
		return NULL;
	}
        unmask_evtchn(dev->evtchn);

	/* In case we have in-flight data after save/restore... */
	notify_daemon(dev);

	return dev;
}
Exemple #3
0
static void ixp_install_response(struct ixpfront_info *info, struct ixp_response *iresp) {
  	struct app_response aresp;
	char *rsp_ptr = NULL, *dst_ptr = NULL;
	int offset = 0, msg_size = 0;

        aresp.status = iresp->status;
	
	/* Copy response here into r_params.presp */
	rsp_ptr = mfn_to_virt(pfn_to_mfn(info->shadow[iresp->id].frame[0]));

	if(rsp_ptr == NULL) 
	  printk(KERN_ERR "pfn_to_virt returned NULL\n");
		
	dst_ptr = info->shadow[iresp->id].r_params.presp;
	
	msg_size = ((struct des_request *)rsp_ptr)->msg_size;
	offset = sizeof(struct des_request) + ((struct des_request *)rsp_ptr)->key_size + ((struct des_request *)rsp_ptr)->iv_size;

	rsp_ptr += offset;

	memcpy(dst_ptr, rsp_ptr, msg_size);

        info->app_cb(info->shadow[iresp->id].r_params.callbk_tag, &aresp);	

	return; 
}
Exemple #4
0
static inline struct xencons_interface *xencons_interface(void)
{
    if (start_info.console.domU.evtchn)
        return mfn_to_virt(start_info.console.domU.mfn);
    else
        return NULL;
} 
Exemple #5
0
static inline struct xencons_interface *xencons_interface(void)
{
	if (console_pfn == ~0ul)
		return mfn_to_virt(xen_start_info->console.domU.mfn);
	else
		return __va(console_pfn << PAGE_SHIFT);
}
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
		    struct gnttab_map_grant_ref *kmap_ops,
		    struct page **pages, unsigned int count)
{
	int i, ret;
	pte_t *pte;
	unsigned long mfn;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	for (i = 0; i < count; i++) {
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		ret = m2p_add_override(mfn, pages[i], kmap_ops ?
				       &kmap_ops[i] : NULL);
		if (ret)
			return ret;
	}

	return ret;
}
Exemple #7
0
CAMLprim value
caml_console_start_page(value v_unit)
{
    CAMLparam1(v_unit);
    CAMLlocal1(v_ret);
    unsigned char *page = mfn_to_virt(start_info.console.domU.mfn);
    v_ret = (value)page;
    CAMLreturn(v_ret);
}
Exemple #8
0
CAMLprim value
caml_xenstore_start_page(value v_unit)
{
    CAMLparam1(v_unit);
    CAMLlocal1(v_ret);
    unsigned char *page = mfn_to_virt(start_info.store_mfn);
    v_ret = (value)page;
    CAMLreturn(v_ret);
}
Exemple #9
0
static int xsd_kva_read(char *page, char **start, off_t off,
			int count, int *eof, void *data)
{
	int len;

	len  = sprintf(page, "0x%p", mfn_to_virt(xen_start_info->store_mfn));
	*eof = 1;
	return len;
}
CAMLprim value
caml_xenstore_start_page(value v_unit)
{
  CAMLparam1(v_unit);
  CAMLreturn(caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT,
                                1,
                                mfn_to_virt(start_info.store_mfn),
                                (long)PAGE_SIZE));
}
Exemple #11
0
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;
	pte_t *pte;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn, pfn;

		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		pfn = page_to_pfn(pages[i]);

		WARN_ON(PagePrivate(pages[i]));
		SetPagePrivate(pages[i]);
		set_page_private(pages[i], mfn);
		pages[i]->index = pfn_to_mfn(pfn);

		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
			goto out;
		}

		if (kmap_ops) {
			ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
			if (ret)
				goto out;
		}
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();

	return ret;
}
CAMLprim value
caml_xenstore_start_page(value v_unit)
{
  CAMLparam1(v_unit);
  CAMLlocal1(v_ret);
  intnat dims[] = { PAGE_SIZE };
  unsigned char *page = mfn_to_virt(start_info.store_mfn);
  v_ret = caml_ba_alloc(CAML_BA_UINT8 | CAML_BA_C_LAYOUT, 1, page, dims);
  CAMLreturn(v_ret);
}
Exemple #13
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count)
{
    int i, ret;
    bool lazy = false;
    pte_t *pte;
    unsigned long mfn;

    ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
    if (ret)
        return ret;

    /* Retry eagain maps */
    for (i = 0; i < count; i++)
        if (map_ops[i].status == GNTST_eagain)
            gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
                                    &map_ops[i].status, __func__);

    if (xen_feature(XENFEAT_auto_translated_physmap))
        return ret;

    if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
        arch_enter_lazy_mmu_mode();
        lazy = true;
    }

    for (i = 0; i < count; i++) {
        /* Do not add to override if the map failed. */
        if (map_ops[i].status)
            continue;

        if (map_ops[i].flags & GNTMAP_contains_pte) {
            pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
                             (map_ops[i].host_addr & ~PAGE_MASK));
            mfn = pte_mfn(*pte);
        } else {
            mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
        }
        ret = m2p_add_override(mfn, pages[i], kmap_ops ?
                               &kmap_ops[i] : NULL);
        if (ret)
            goto out;
    }

out:
    if (lazy)
        arch_leave_lazy_mmu_mode();

    return ret;
}
Exemple #14
0
static int __init xenbus_init(void)
{
	int err = 0;

	if (!xen_domain())
		return -ENODEV;

	if (xen_hvm_domain()) {
		uint64_t v = 0;
		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
		if (err)
			goto out_error;
		xen_store_evtchn = (int)v;
		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
		if (err)
			goto out_error;
		xen_store_mfn = (unsigned long)v;
		xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
	} else {
		xen_store_evtchn = xen_start_info->store_evtchn;
		xen_store_mfn = xen_start_info->store_mfn;
		if (xen_store_evtchn)
			xenstored_ready = 1;
		else {
			err = xenstored_local_init();
			if (err)
				goto out_error;
		}
		xen_store_interface = mfn_to_virt(xen_store_mfn);
	}

	/* Initialize the interface to xenstore. */
	err = xs_init();
	if (err) {
		printk(KERN_WARNING
		       "XENBUS: Error initializing xenstore comms: %i\n", err);
		goto out_error;
	}

#ifdef CONFIG_XEN_COMPAT_XENFS
	/*
	 * Create xenfs mountpoint in /proc for compatibility with
	 * utilities that expect to find "xenbus" under "/proc/xen".
	 */
	proc_mkdir("xen", NULL);
#endif

out_error:
	return err;
}
Exemple #15
0
/* Initialise xenbus. */
void init_xenbus(void)
{
    int err;
    DEBUG("init_xenbus called.\n");
    xenstore_buf = mfn_to_virt(start_info.store_mfn);
    create_thread("xenstore", xenbus_thread_func, NULL);
    DEBUG("buf at %p.\n", xenstore_buf);
    err = bind_evtchn(start_info.store_evtchn,
		      xenbus_evtchn_handler,
              NULL);
    unmask_evtchn(start_info.store_evtchn);
    printk("xenbus initialised on irq %d mfn %#lx\n",
	   err, start_info.store_mfn);
}
Exemple #16
0
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
		    struct page **pages, unsigned int count)
{
	int i, ret;
	pte_t *pte;
	unsigned long mfn;

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
	if (ret)
		return ret;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return ret;

	for (i = 0; i < count; i++) {
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			/* If you really wanted to do this:
			 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
			 *
			 * The reason we do not implement it is b/c on the
			 * unmap path (gnttab_unmap_refs) we have no means of
			 * checking whether the page is !GNTMAP_contains_pte.
			 *
			 * That is without some extra data-structure to carry
			 * the struct page, bool clear_pte, and list_head next
			 * tuples and deal with allocation/delallocation, etc.
			 *
			 * The users of this API set the GNTMAP_contains_pte
			 * flag so lets just return not supported until it
			 * becomes neccessary to implement.
			 */
			return -EOPNOTSUPP;
		}
		ret = m2p_add_override(mfn, pages[i],
				       map_ops[i].flags & GNTMAP_contains_pte);
		if (ret)
			return ret;
	}

	return ret;
}
Exemple #17
0
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
{
	int i, ret = 0;
	pte_t *pte;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops) {
		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
						kmap_ops, count);
		if (ret)
			goto out;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn, pfn;

		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		pfn = page_to_pfn(pages[i]);

		WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");

		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
			goto out;
		}
	}

out:
	return ret;
}
static int __init xenbus_init(void)
{
    int err = 0;
    unsigned long page = 0;

    DPRINTK("");

    err = -ENODEV;
    if (!xen_domain())
        return err;

    /*
     * Domain0 doesn't have a store_evtchn or store_mfn yet.
     */
    if (xen_initial_domain()) {
        struct evtchn_alloc_unbound alloc_unbound;

        /* Allocate Xenstore page */
        page = get_zeroed_page(GFP_KERNEL);
        if (!page)
            goto out_error;

        xen_store_mfn = xen_start_info->store_mfn =
                            pfn_to_mfn(virt_to_phys((void *)page) >>
                                       PAGE_SHIFT);

        /* Next allocate a local port which xenstored can bind to */
        alloc_unbound.dom        = DOMID_SELF;
        alloc_unbound.remote_dom = 0;

        err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
                                          &alloc_unbound);
        if (err == -ENOSYS)
            goto out_error;

        BUG_ON(err);
        xen_store_evtchn = xen_start_info->store_evtchn =
                               alloc_unbound.port;

        xen_store_interface = mfn_to_virt(xen_store_mfn);
    } else {
        if (xen_hvm_domain()) {
Exemple #19
0
/*
 * Fully allocate the p2m structure for a given pfn.  We need to check
 * that both the top and mid levels are allocated, and make sure the
 * parallel mfn tree is kept in sync.  We may race with other cpus, so
 * the new pages are installed with cmpxchg; if we lose the race then
 * simply free the page we allocated and use the one that's there.
 */
static bool alloc_p2m(unsigned long pfn)
{
	unsigned topidx;
	unsigned long *top_mfn_p, *mid_mfn;
	pte_t *ptep, *pte_pg;
	unsigned int level;
	unsigned long flags;
	unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
	unsigned long p2m_pfn;

	ptep = lookup_address(addr, &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
	pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));

	if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
		/* PMD level is missing, allocate a new one */
		ptep = alloc_p2m_pmd(addr, pte_pg);
		if (!ptep)
			return false;
	}

	if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
		topidx = p2m_top_index(pfn);
		top_mfn_p = &p2m_top_mfn[topidx];
		mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);

		BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);

		if (mid_mfn == p2m_mid_missing_mfn) {
			/* Separately check the mid mfn level */
			unsigned long missing_mfn;
			unsigned long mid_mfn_mfn;
			unsigned long old_mfn;

			mid_mfn = alloc_p2m_page();
			if (!mid_mfn)
				return false;

			p2m_mid_mfn_init(mid_mfn, p2m_missing);

			missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
			mid_mfn_mfn = virt_to_mfn(mid_mfn);
			old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
			if (old_mfn != missing_mfn) {
				free_p2m_page(mid_mfn);
				mid_mfn = mfn_to_virt(old_mfn);
			} else {
				p2m_top_mfn_p[topidx] = mid_mfn;
			}
		}
	} else {
		mid_mfn = NULL;
	}

	p2m_pfn = pte_pfn(READ_ONCE(*ptep));
	if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
	    p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
		/* p2m leaf page is missing */
		unsigned long *p2m;

		p2m = alloc_p2m_page();
		if (!p2m)
			return false;

		if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
			p2m_init(p2m);
		else
			p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));

		spin_lock_irqsave(&p2m_update_lock, flags);

		if (pte_pfn(*ptep) == p2m_pfn) {
			HYPERVISOR_shared_info->arch.p2m_generation++;
			wmb(); /* Tools are synchronizing via p2m_generation. */
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
			wmb(); /* Tools are synchronizing via p2m_generation. */
			HYPERVISOR_shared_info->arch.p2m_generation++;
			if (mid_mfn)
				mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
			p2m = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);

		if (p2m)
			free_p2m_page(p2m);
	}

	/* Expanded the p2m? */
	if (pfn > xen_p2m_last_pfn) {
		xen_p2m_last_pfn = pfn;
		HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
	}

	return true;
}
Exemple #20
0
static int __devinit xenbus_probe_init(void)
#endif
{
	int err = 0;
#if defined(CONFIG_XEN) || defined(MODULE)
	unsigned long page = 0;
#endif

	DPRINTK("");

	if (!is_running_on_xen())
		return -ENODEV;

	/* Register ourselves with the kernel bus subsystem */
	xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
	if (xenbus_frontend.error)
		printk(KERN_WARNING
		       "XENBUS: Error registering frontend bus: %i\n",
		       xenbus_frontend.error);
	xenbus_backend_bus_register();

	/*
	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
	 */
	if (is_initial_xendomain()) {
#if defined(CONFIG_XEN) || defined(MODULE)
		struct evtchn_alloc_unbound alloc_unbound;

		/* Allocate page. */
		page = get_zeroed_page(GFP_KERNEL);
		if (!page)
			return -ENOMEM;

		xen_store_mfn = xen_start_info->store_mfn =
			pfn_to_mfn(virt_to_phys((void *)page) >>
				   PAGE_SHIFT);

		/* Next allocate a local port which xenstored can bind to */
		alloc_unbound.dom        = DOMID_SELF;
		alloc_unbound.remote_dom = 0;

		err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
						  &alloc_unbound);
		if (err == -ENOSYS)
			goto err;
		BUG_ON(err);
		xen_store_evtchn = xen_start_info->store_evtchn =
			alloc_unbound.port;

#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
		/* And finally publish the above info in /proc/xen */
		xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
		if (xsd_kva_intf) {
			memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
			       sizeof(xsd_kva_fops));
			xsd_kva_fops.mmap = xsd_kva_mmap;
			xsd_kva_intf->proc_fops = &xsd_kva_fops;
			xsd_kva_intf->read_proc = xsd_kva_read;
		}
		xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
		if (xsd_port_intf)
			xsd_port_intf->read_proc = xsd_port_read;
#endif
#else
		/* dom0 not yet supported */
#endif
		xen_store_interface = mfn_to_virt(xen_store_mfn);
	} else {
Exemple #21
0
static int __init pvh_load_kernel(struct domain *d, const module_t *image,
                                  unsigned long image_headroom,
                                  module_t *initrd, void *image_base,
                                  char *cmdline, paddr_t *entry,
                                  paddr_t *start_info_addr)
{
    void *image_start = image_base + image_headroom;
    unsigned long image_len = image->mod_end;
    struct elf_binary elf;
    struct elf_dom_parms parms;
    paddr_t last_addr;
    struct hvm_start_info start_info = { 0 };
    struct hvm_modlist_entry mod = { 0 };
    struct vcpu *v = d->vcpu[0];
    int rc;

    if ( (rc = bzimage_parse(image_base, &image_start, &image_len)) != 0 )
    {
        printk("Error trying to detect bz compressed kernel\n");
        return rc;
    }

    if ( (rc = elf_init(&elf, image_start, image_len)) != 0 )
    {
        printk("Unable to init ELF\n");
        return rc;
    }
#ifdef VERBOSE
    elf_set_verbose(&elf);
#endif
    elf_parse_binary(&elf);
    if ( (rc = elf_xen_parse(&elf, &parms)) != 0 )
    {
        printk("Unable to parse kernel for ELFNOTES\n");
        return rc;
    }

    if ( parms.phys_entry == UNSET_ADDR32 )
    {
        printk("Unable to find XEN_ELFNOTE_PHYS32_ENTRY address\n");
        return -EINVAL;
    }

    printk("OS: %s version: %s loader: %s bitness: %s\n", parms.guest_os,
           parms.guest_ver, parms.loader,
           elf_64bit(&elf) ? "64-bit" : "32-bit");

    /* Copy the OS image and free temporary buffer. */
    elf.dest_base = (void *)(parms.virt_kstart - parms.virt_base);
    elf.dest_size = parms.virt_kend - parms.virt_kstart;

    elf_set_vcpu(&elf, v);
    rc = elf_load_binary(&elf);
    if ( rc < 0 )
    {
        printk("Failed to load kernel: %d\n", rc);
        printk("Xen dom0 kernel broken ELF: %s\n", elf_check_broken(&elf));
        return rc;
    }

    last_addr = ROUNDUP(parms.virt_kend - parms.virt_base, PAGE_SIZE);

    if ( initrd != NULL )
    {
        rc = hvm_copy_to_guest_phys(last_addr, mfn_to_virt(initrd->mod_start),
                                    initrd->mod_end, v);
        if ( rc )
        {
            printk("Unable to copy initrd to guest\n");
            return rc;
        }

        mod.paddr = last_addr;
        mod.size = initrd->mod_end;
        last_addr += ROUNDUP(initrd->mod_end, PAGE_SIZE);
    }

    /* Free temporary buffers. */
    discard_initial_images();

    if ( cmdline != NULL )
    {
        rc = hvm_copy_to_guest_phys(last_addr, cmdline, strlen(cmdline) + 1, v);
        if ( rc )
        {
            printk("Unable to copy guest command line\n");
            return rc;
        }
        start_info.cmdline_paddr = last_addr;
        /*
         * Round up to 32/64 bits (depending on the guest kernel bitness) so
         * the modlist/start_info is aligned.
         */
        last_addr += ROUNDUP(strlen(cmdline) + 1, elf_64bit(&elf) ? 8 : 4);
    }
    if ( initrd != NULL )
    {
        rc = hvm_copy_to_guest_phys(last_addr, &mod, sizeof(mod), v);
        if ( rc )
        {
            printk("Unable to copy guest modules\n");
            return rc;
        }
        start_info.modlist_paddr = last_addr;
        start_info.nr_modules = 1;
        last_addr += sizeof(mod);
    }

    start_info.magic = XEN_HVM_START_MAGIC_VALUE;
    start_info.flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
    rc = hvm_copy_to_guest_phys(last_addr, &start_info, sizeof(start_info), v);
    if ( rc )
    {
        printk("Unable to copy start info to guest\n");
        return rc;
    }

    *entry = parms.phys_entry;
    *start_info_addr = last_addr;

    return 0;
}
Exemple #22
0
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start, ram_end, ram_size;
    paddr_t contig_start, contig_end;
    paddr_t s, e;
    unsigned long ram_pages;
    unsigned long heap_pages, xenheap_pages, domheap_pages;
    unsigned long dtb_pages;
    unsigned long boot_mfn_start, boot_mfn_end;
    int i;
    void *fdt;

    if ( !early_info.mem.nr_banks )
        early_panic("No memory bank");

    /*
     * We are going to accumulate two regions here.
     *
     * The first is the bounds of the initial memory region which is
     * contiguous with the first bank. For simplicity the xenheap is
     * always allocated from this region.
     *
     * The second is the complete bounds of the regions containing RAM
     * (ie. from the lowest RAM address to the highest), which
     * includes any holes.
     *
     * We also track the number of actual RAM pages (i.e. not counting
     * the holes).
     */
    ram_size  = early_info.mem.bank[0].size;

    contig_start = ram_start = early_info.mem.bank[0].start;
    contig_end   = ram_end = ram_start + ram_size;

    for ( i = 1; i < early_info.mem.nr_banks; i++ )
    {
        paddr_t bank_start = early_info.mem.bank[i].start;
        paddr_t bank_size = early_info.mem.bank[i].size;
        paddr_t bank_end = bank_start + bank_size;

        paddr_t new_ram_size = ram_size + bank_size;
        paddr_t new_ram_start = min(ram_start,bank_start);
        paddr_t new_ram_end = max(ram_end,bank_end);

        /*
         * If the new bank is contiguous with the initial contiguous
         * region then incorporate it into the contiguous region.
         *
         * Otherwise we allow non-contigious regions so long as at
         * least half of the total RAM region actually contains
         * RAM. We actually fudge this slightly and require that
         * adding the current bank does not cause us to violate this
         * restriction.
         *
         * This restriction ensures that the frametable (which is not
         * currently sparse) does not consume all available RAM.
         */
        if ( bank_start == contig_end )
            contig_end = bank_end;
        else if ( bank_end == contig_start )
            contig_start = bank_start;
        else if ( 2 * new_ram_size < new_ram_end - new_ram_start )
            /* Would create memory map which is too sparse, so stop here. */
            break;

        ram_size = new_ram_size;
        ram_start = new_ram_start;
        ram_end = new_ram_end;
    }

    if ( i != early_info.mem.nr_banks )
    {
        early_printk("WARNING: only using %d out of %d memory banks\n",
                     i, early_info.mem.nr_banks);
        early_info.mem.nr_banks = i;
    }

    total_pages = ram_pages = ram_size >> PAGE_SHIFT;

    /*
     * Locate the xenheap using these constraints:
     *
     *  - must be 32 MiB aligned
     *  - must not include Xen itself or the boot modules
     *  - must be at most 1/8 the total RAM in the system
     *  - must be at least 128M
     *
     * We try to allocate the largest xenheap possible within these
     * constraints.
     */
    heap_pages = ram_pages;
    xenheap_pages = (heap_pages/8 + 0x1fffUL) & ~0x1fffUL;
    xenheap_pages = max(xenheap_pages, 128UL<<(20-PAGE_SHIFT));

    do
    {
        /* xenheap is always in the initial contiguous region */
        e = consider_modules(contig_start, contig_end,
                             pfn_to_paddr(xenheap_pages),
                             32<<20, 0);
        if ( e )
            break;

        xenheap_pages >>= 1;
    } while ( xenheap_pages > 128<<(20-PAGE_SHIFT) );

    if ( ! e )
        early_panic("Not not enough space for xenheap");

    domheap_pages = heap_pages - xenheap_pages;

    early_printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n",
                 e - (pfn_to_paddr(xenheap_pages)), e,
                 xenheap_pages);
    early_printk("Dom heap: %lu pages\n", domheap_pages);

    setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);

    /*
     * Need a single mapped page for populating bootmem_region_list
     * and enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
    boot_mfn_end = xenheap_mfn_end;

    init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size, BUFFERABLE);
    device_tree_flattened = fdt;

    /* Add non-xenheap memory */
    for ( i = 0; i < early_info.mem.nr_banks; i++ )
    {
        paddr_t bank_start = early_info.mem.bank[i].start;
        paddr_t bank_end = bank_start + early_info.mem.bank[i].size;

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = ram_end;
            }

            /*
             * Module in a RAM bank other than the one which we are
             * not dealing with here.
             */
            if ( e > bank_end )
                e = bank_end;

            /* Avoid the xenheap */
            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
                 && pfn_to_paddr(xenheap_mfn_start) < e )
            {
                e = pfn_to_paddr(xenheap_mfn_start);
                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
            }

            dt_unreserved_regions(s, e, init_boot_pages, 0);

            s = n;
        }
    }

    /* Frame table covers all of RAM region, including holes */
    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));

    end_boot_allocator();
}
static int __init xenbus_init(void)
{
	int err = 0;
	uint64_t v = 0;
	xen_store_domain_type = XS_UNKNOWN;

	if (!xen_domain())
		return -ENODEV;

	xenbus_ring_ops_init();

	if (xen_pv_domain())
		xen_store_domain_type = XS_PV;
	if (xen_hvm_domain())
		xen_store_domain_type = XS_HVM;
	if (xen_hvm_domain() && xen_initial_domain())
		xen_store_domain_type = XS_LOCAL;
	if (xen_pv_domain() && !xen_start_info->store_evtchn)
		xen_store_domain_type = XS_LOCAL;
	if (xen_pv_domain() && xen_start_info->store_evtchn)
		xenstored_ready = 1;

	switch (xen_store_domain_type) {
	case XS_LOCAL:
		err = xenstored_local_init();
		if (err)
			goto out_error;
		xen_store_interface = mfn_to_virt(xen_store_mfn);
		break;
	case XS_PV:
		xen_store_evtchn = xen_start_info->store_evtchn;
		xen_store_mfn = xen_start_info->store_mfn;
		xen_store_interface = mfn_to_virt(xen_store_mfn);
		break;
	case XS_HVM:
		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
		if (err)
			goto out_error;
		xen_store_evtchn = (int)v;
		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
		if (err)
			goto out_error;
		xen_store_mfn = (unsigned long)v;
		xen_store_interface =
			xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
		break;
	default:
		pr_warn("Xenstore state unknown\n");
		break;
	}

	/* Initialize the interface to xenstore. */
	err = xs_init();
	if (err) {
		pr_warn("Error initializing xenstore comms: %i\n", err);
		goto out_error;
	}

#ifdef CONFIG_XEN_COMPAT_XENFS
	/*
	 * Create xenfs mountpoint in /proc for compatibility with
	 * utilities that expect to find "xenbus" under "/proc/xen".
	 */
	proc_mkdir("xen", NULL);
#endif

out_error:
	return err;
}
Exemple #24
0
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start, ram_end, ram_size;
    paddr_t s, e;
    unsigned long ram_pages;
    unsigned long heap_pages, xenheap_pages, domheap_pages;
    unsigned long dtb_pages;
    unsigned long boot_mfn_start, boot_mfn_end;
    int i;
    void *fdt;

    if ( !bootinfo.mem.nr_banks )
        panic("No memory bank");

    init_pdx();

    ram_start = bootinfo.mem.bank[0].start;
    ram_size  = bootinfo.mem.bank[0].size;
    ram_end   = ram_start + ram_size;

    for ( i = 1; i < bootinfo.mem.nr_banks; i++ )
    {
        paddr_t bank_start = bootinfo.mem.bank[i].start;
        paddr_t bank_size = bootinfo.mem.bank[i].size;
        paddr_t bank_end = bank_start + bank_size;

        ram_size  = ram_size + bank_size;
        ram_start = min(ram_start,bank_start);
        ram_end   = max(ram_end,bank_end);
    }

    total_pages = ram_pages = ram_size >> PAGE_SHIFT;

    /*
     * If the user has not requested otherwise via the command line
     * then locate the xenheap using these constraints:
     *
     *  - must be 32 MiB aligned
     *  - must not include Xen itself or the boot modules
     *  - must be at most 1GB or 1/32 the total RAM in the system if less
     *  - must be at least 32M
     *
     * We try to allocate the largest xenheap possible within these
     * constraints.
     */
    heap_pages = ram_pages;
    if ( opt_xenheap_megabytes )
        xenheap_pages = opt_xenheap_megabytes << (20-PAGE_SHIFT);
    else
    {
        xenheap_pages = (heap_pages/32 + 0x1fffUL) & ~0x1fffUL;
        xenheap_pages = max(xenheap_pages, 32UL<<(20-PAGE_SHIFT));
        xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT));
    }

    do
    {
        e = consider_modules(ram_start, ram_end,
                             pfn_to_paddr(xenheap_pages),
                             32<<20, 0);
        if ( e )
            break;

        xenheap_pages >>= 1;
    } while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) );

    if ( ! e )
        panic("Not not enough space for xenheap");

    domheap_pages = heap_pages - xenheap_pages;

    printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages%s)\n",
           e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages,
           opt_xenheap_megabytes ? ", from command-line" : "");
    printk("Dom heap: %lu pages\n", domheap_pages);

    setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);

    /*
     * Need a single mapped page for populating bootmem_region_list
     * and enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
    boot_mfn_end = xenheap_mfn_end;

    init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size);
    device_tree_flattened = fdt;

    /* Add non-xenheap memory */
    for ( i = 0; i < bootinfo.mem.nr_banks; i++ )
    {
        paddr_t bank_start = bootinfo.mem.bank[i].start;
        paddr_t bank_end = bank_start + bootinfo.mem.bank[i].size;

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = ram_end;
            }

            /*
             * Module in a RAM bank other than the one which we are
             * not dealing with here.
             */
            if ( e > bank_end )
                e = bank_end;

            /* Avoid the xenheap */
            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
                 && pfn_to_paddr(xenheap_mfn_start) < e )
            {
                e = pfn_to_paddr(xenheap_mfn_start);
                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
            }

            dt_unreserved_regions(s, e, init_boot_pages, 0);

            s = n;
        }
    }

    /* Frame table covers all of RAM region, including holes */
    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));
}
Exemple #25
0
    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));
}
#else /* CONFIG_ARM_64 */
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start = ~0;
    paddr_t ram_end = 0;
    paddr_t ram_size = 0;
    int bank;
    unsigned long dtb_pages;
    void *fdt;

    init_pdx();

    total_pages = 0;
    for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
    {
        paddr_t bank_start = bootinfo.mem.bank[bank].start;
        paddr_t bank_size = bootinfo.mem.bank[bank].size;
        paddr_t bank_end = bank_start + bank_size;
        paddr_t s, e;

        ram_size = ram_size + bank_size;
        ram_start = min(ram_start,bank_start);
        ram_end = max(ram_end,bank_end);

        setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = bank_end;
            }

            if ( e > bank_end )
                e = bank_end;

            xenheap_mfn_end = e;

            dt_unreserved_regions(s, e, init_boot_pages, 0);
            s = n;
        }
    }

    total_pages += ram_size >> PAGE_SHIFT;

    xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
    xenheap_mfn_start = ram_start >> PAGE_SHIFT;
    xenheap_mfn_end = ram_end >> PAGE_SHIFT;

    /*
     * Need enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size);
    device_tree_flattened = fdt;

    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);
}
static int __init xenbus_probe_init(void)
{
	int err = 0;

	DPRINTK("");

	err = -ENODEV;
	if (!xen_domain())
		goto out_error;

	/* Register ourselves with the kernel bus subsystem */
	err = bus_register(&xenbus_frontend.bus);
	if (err)
		goto out_error;

	err = xenbus_backend_bus_register();
	if (err)
		goto out_unreg_front;

	/*
	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
	 */
	if (xen_initial_domain()) {
		/* dom0 not yet supported */
	} else {
		xenstored_ready = 1;
		xen_store_evtchn = xen_start_info->store_evtchn;
		xen_store_mfn = xen_start_info->store_mfn;
	}
	xen_store_interface = mfn_to_virt(xen_store_mfn);

	/* Initialize the interface to xenstore. */
	err = xs_init();
	if (err) {
		printk(KERN_WARNING
		       "XENBUS: Error initializing xenstore comms: %i\n", err);
		goto out_unreg_back;
	}

	if (!xen_initial_domain())
		xenbus_probe(NULL);

#ifdef CONFIG_XEN_COMPAT_XENFS
	/*
	 * Create xenfs mountpoint in /proc for compatibility with
	 * utilities that expect to find "xenbus" under "/proc/xen".
	 */
	proc_mkdir("xen", NULL);
#endif

	return 0;

  out_unreg_back:
	xenbus_backend_bus_unregister();

  out_unreg_front:
	bus_unregister(&xenbus_frontend.bus);

  out_error:
	return err;
}
Exemple #27
0
    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));

    end_boot_allocator();
}
#else /* CONFIG_ARM_64 */
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start = ~0;
    paddr_t ram_end = 0;
    paddr_t ram_size = 0;
    int bank;
    unsigned long dtb_pages;
    void *fdt;

    total_pages = 0;
    for ( bank = 0 ; bank < early_info.mem.nr_banks; bank++ )
    {
        paddr_t bank_start = early_info.mem.bank[bank].start;
        paddr_t bank_size = early_info.mem.bank[bank].size;
        paddr_t bank_end = bank_start + bank_size;
        paddr_t s, e;

        paddr_t new_ram_size = ram_size + bank_size;
        paddr_t new_ram_start = min(ram_start,bank_start);
        paddr_t new_ram_end = max(ram_end,bank_end);

        /*
         * We allow non-contigious regions so long as at least half of
         * the total RAM region actually contains RAM. We actually
         * fudge this slightly and require that adding the current
         * bank does not cause us to violate this restriction.
         *
         * This restriction ensures that the frametable (which is not
         * currently sparse) does not consume all available RAM.
         */
        if ( bank > 0 && 2 * new_ram_size < new_ram_end - new_ram_start )
            /* Would create memory map which is too sparse, so stop here. */
            break;

        ram_start = new_ram_start;
        ram_end = new_ram_end;
        ram_size = new_ram_size;

        setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = bank_end;
            }

            if ( e > bank_end )
                e = bank_end;

            xenheap_mfn_end = e;

            dt_unreserved_regions(s, e, init_boot_pages, 0);
            s = n;
        }
    }

    if ( bank != early_info.mem.nr_banks )
    {
        early_printk("WARNING: only using %d out of %d memory banks\n",
                     bank, early_info.mem.nr_banks);
        early_info.mem.nr_banks = bank;
    }

    total_pages += ram_size >> PAGE_SHIFT;

    xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
    xenheap_mfn_start = ram_start >> PAGE_SHIFT;
    xenheap_mfn_end = ram_end >> PAGE_SHIFT;
    xenheap_max_mfn(xenheap_mfn_end);

    /*
     * Need enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size, BUFFERABLE);
    device_tree_flattened = fdt;

    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    end_boot_allocator();
}
Exemple #28
0
static __cpuinit int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;
	unsigned long gdt_mfn;

	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_table(cpu);

	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ds = __USER_DS;
	ctxt->user_regs.es = __USER_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);

	gdt_mfn = arbitrary_virt_to_mfn(gdt);
	make_lowmem_page_readonly(gdt);
	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

	ctxt->gdt_frames[0] = gdt_mfn;
	ctxt->gdt_ents      = GDT_ENTRIES;

	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);

	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = idle->thread.sp0;

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#endif
	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
Exemple #29
0
static inline struct xencons_interface *xencons_interface(void)
{
	return mfn_to_virt(xen_start_info->console.domU.mfn);
}
Exemple #30
0
static int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;
	unsigned long gdt_mfn;

	/* used to tell cpu_init() that it can proceed with initialization */
	cpumask_set_cpu(cpu, cpu_callout_mask);
	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_rw(cpu);

#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
#endif
	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	/*
	 * Bring up the CPU in cpu_bringup_and_idle() with the stack
	 * pointing just below where pt_regs would be if it were a normal
	 * kernel entry.
	 */
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
	ctxt->user_regs.ds = __USER_DS;
	ctxt->user_regs.es = __USER_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);

	gdt_mfn = arbitrary_virt_to_mfn(gdt);
	make_lowmem_page_readonly(gdt);
	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

	ctxt->gdt_frames[0] = gdt_mfn;
	ctxt->gdt_ents      = GDT_ENTRIES;

	/*
	 * Set SS:SP that Xen will use when entering guest kernel mode
	 * from guest user mode.  Subsequent calls to load_sp0() can
	 * change this value.
	 */
	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = task_top_of_stack(idle);

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
	ctxt->event_callback_eip    =
		(unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip =
		(unsigned long)xen_failsafe_callback;
	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);

	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}