Example #1
0
int get_dom_info(xen_interface_t *xen, const char *input, domid_t *domID,
        char **name) {

    uint32_t _domID = ~0;
    char *_name = NULL;

    sscanf(input, "%u", &_domID);

    if (_domID == ~0) {
        _name = strdup(input);
        libxl_name_to_domid(xen->xl_ctx, input, &_domID);
        if (!_domID || _domID == ~0) {
            printf("Domain is not running, failed to get domID from name!\n");
            free(_name);
            return -1;
        } else {
            //printf("Got domID from name: %u\n", _domID);
        }
    } else {

        xc_dominfo_t info = { 0 };
        if ( 1 == xc_domain_getinfo(xen->xc, _domID, 1, &info)
            && info.domid == _domID)
        {
            _name = libxl_domid_to_name(xen->xl_ctx, _domID);
        } else {
            _domID = ~0;
        }
    }

    *name = _name;
    *domID = (domid_t)_domID;

    return 1;
}
Example #2
0
void print_sharing_info(xen_interface_t *xen, domid_t domID) {

    xc_dominfo_t info;
    xc_domain_getinfo(xen->xc, domID, 1, &info);

    printf("Shared memory pages: %lu\n", info.nr_shared_pages);
}
Example #3
0
void xen_pause(xen_interface_t *xen, domid_t domID) {
    xc_dominfo_t info = { 0 };

    if (1 == xc_domain_getinfo(xen->xc, domID, 1, &info) && info.domid == domID && !info.paused)
        xc_domain_pause(xen->xc, domID);

}
Example #4
0
long xc_get_tot_pages(xc_interface *xch, uint32_t domid)
{
    xc_dominfo_t info;
    if ( (xc_domain_getinfo(xch, domid, 1, &info) != 1) ||
         (info.domid != domid) )
        return -1;
    return info.nr_pages;
}
Example #5
0
/**
 *  Returns a non-zero value if this slot is occuped by a left-over domain.
 *
 */
static int slot_occupied_by_dead_domain(int slot)
{
    int dying, ret;
    xc_dominfo_t info;
    char * reported_domid;

    struct domain * d;

    //If we've been passed the special slot -1, vacuously return false;
    //this is an indication that a domain has no real slot (and thus can't
    //occupy a slot).
    if(slot == -1) {
        return 0;
    }

    d = domain_with_slot(slot);

    //If no domain occupies this slot, it can't be occupied by a dead one.
    if(!d) {
        return 0;
    }


    //Ask xen for information about the domain...
    ret = xc_domain_getinfo(xc_handle, d->domid, 1, &info);

    //If the domain doesn't exist, according to Xen, we've stumbled
    //upon a left-over dead record!
    if(ret != 1) {
        return 1;
    }

    //Otherwise, we'll have to employ an ugly heuristic to see if
    //the toolstack has tried (and failed!) to destroy the VM.
    //
    //If you see a way to improve this heuristic, by all means, do!
    //Current heuristic:
    //
    //- If the domain is marked as /dying/, we've started the process
    //  of killing it; and
    //- If the domain's record has been purged from the Xenstore, its
    //  cleanup is either done or no longer possible.
    //
    //If both conditions are met, we've found a dead-in-all-but-name-VM.
    //
    reported_domid = xenstore_dom_read(d->domid, "domid");

    //If we've obtained a domid from the xenstore, this domain still has
    //a xenstore record, and likely is not yet dead.
    if(reported_domid) {
      free(reported_domid);
      return 0;
    }

    //Otherwise, we'll return the dying status-- as this will tell us
    //if the domain is all-but-dead, as defined above.
    return info.dying;
}
Example #6
0
static int modify_returncode(xc_interface *xch, uint32_t domid)
{
    vcpu_guest_context_any_t ctxt;
    xc_dominfo_t info;
    xen_capabilities_info_t caps;
    struct domain_info_context _dinfo = {};
    struct domain_info_context *dinfo = &_dinfo;
    int rc;

    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
         info.domid != domid )
    {
        PERROR("Could not get domain info");
        return -1;
    }

    if ( !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) )
    {
        ERROR("Dom %d not suspended: (shutdown %d, reason %d)", domid,
              info.shutdown, info.shutdown_reason);
        errno = EINVAL;
        return -1;
    }

    if ( info.hvm )
    {
        /* HVM guests without PV drivers have no return code to modify. */
        uint64_t irq = 0;
        xc_hvm_param_get(xch, domid, HVM_PARAM_CALLBACK_IRQ, &irq);
        if ( !irq )
            return 0;

        /* HVM guests have host address width. */
        if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
        {
            PERROR("Could not get Xen capabilities");
            return -1;
        }
        dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4;
    }
    else
    {
        /* Probe PV guest address width. */
        if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) )
            return -1;
    }

    if ( (rc = xc_vcpu_getcontext(xch, domid, 0, &ctxt)) != 0 )
        return rc;

    SET_FIELD(&ctxt, user_regs.eax, 1, dinfo->guest_width);

    if ( (rc = xc_vcpu_setcontext(xch, domid, 0, &ctxt)) != 0 )
        return rc;

    return 0;
}
Example #7
0
// give index of this domain in the qos data array
int indexof(int domid)
{
  int idx;
  xc_dominfo_t dominfo[NDOMAINS];
  int xc_handle, ndomains;
  extern void qos_kill_thread(int domid);
  
  if (domid < 0) {	// shouldn't happen
    printf("bad domain id: %d\r\n", domid);
    return 0;
  }

  for (idx=0; idx<NDOMAINS; idx++)
    if ( (new_qos->domain_info[idx].id == domid) && new_qos->domain_info[idx].in_use)
      return idx;

  // not found, make a new entry
  for (idx=0; idx<NDOMAINS; idx++)
    if (new_qos->domain_info[idx].in_use == 0) {
      global_init_domain(domid, idx);
      return idx;
    }

  // call domaininfo hypercall to try and garbage collect unused entries
  xc_handle = xc_interface_open();
  ndomains = xc_domain_getinfo(xc_handle, 0, NDOMAINS, dominfo);
  xc_interface_close(xc_handle);

  // for each domain in our data, look for it in the system dominfo structure
  // and purge the domain's data from our state if it does not exist in the
  // dominfo structure
  for (idx=0; idx<NDOMAINS; idx++) {
    int domid = new_qos->domain_info[idx].id;
    int jdx;
    
    for (jdx=0; jdx<ndomains; jdx++) {
      if (dominfo[jdx].domid == domid)
	break;
    }
    if (jdx == ndomains)        // we didn't find domid in the dominfo struct
      if (domid != IDLE_DOMAIN_ID) // exception for idle domain, which is not
	                           // contained in dominfo
	qos_kill_thread(domid);	// purge our stale data
  }
  
  // look again for a free slot
  for (idx=0; idx<NDOMAINS; idx++)
    if (new_qos->domain_info[idx].in_use == 0) {
      global_init_domain(domid, idx);
      return idx;
    }

  // still no space found, so bail
  fprintf(stderr, "out of space in domain table, increase NDOMAINS\r\n");
  exit(2);
}
Example #8
0
int is_guest_dead(xa_instance_t *xai){
  if (xc_domain_getinfo(xai->xc_handle,xai->domain_id,1,&(xai->info)) != 1){
    return 1;
  }
  if((xai->info.crashed == 1) || (xai->info.dying == 1)){
    return 1;
  }
  
  return 0;
}
Example #9
0
/* Set the RTDS scheduling parameters of vcpu(s) */
static int sched_rtds_vcpu_set(libxl__gc *gc, uint32_t domid,
                               const libxl_vcpu_sched_params *scinfo)
{
    int r, rc;
    int i;
    uint16_t max_vcpuid;
    xc_dominfo_t info;
    struct xen_domctl_schedparam_vcpu *vcpus;

    r = xc_domain_getinfo(CTX->xch, domid, 1, &info);
    if (r < 0) {
        LOGED(ERROR, domid, "Getting domain info");
        rc = ERROR_FAIL;
        goto out;
    }
    max_vcpuid = info.max_vcpu_id;

    if (scinfo->num_vcpus <= 0) {
        rc = ERROR_INVAL;
        goto out;
    }
    for (i = 0; i < scinfo->num_vcpus; i++) {
        if (scinfo->vcpus[i].vcpuid < 0 ||
            scinfo->vcpus[i].vcpuid > max_vcpuid) {
            LOGD(ERROR, domid, "Invalid VCPU %d: valid range is [0, %d]",
                        scinfo->vcpus[i].vcpuid, max_vcpuid);
            rc = ERROR_INVAL;
            goto out;
        }
        rc = sched_rtds_validate_params(gc, scinfo->vcpus[i].period,
                                        scinfo->vcpus[i].budget);
        if (rc) {
            rc = ERROR_INVAL;
            goto out;
        }
    }
    GCNEW_ARRAY(vcpus, scinfo->num_vcpus);
    for (i = 0; i < scinfo->num_vcpus; i++) {
        vcpus[i].vcpuid = scinfo->vcpus[i].vcpuid;
        vcpus[i].u.rtds.period = scinfo->vcpus[i].period;
        vcpus[i].u.rtds.budget = scinfo->vcpus[i].budget;
    }

    r = xc_sched_rtds_vcpu_set(CTX->xch, domid,
                               vcpus, scinfo->num_vcpus);
    if (r != 0) {
        LOGED(ERROR, domid, "Setting vcpu sched rtds");
        rc = ERROR_FAIL;
        goto out;
    }
    rc = 0;
out:
    return rc;
}
Example #10
0
void xen_unpause(xen_interface_t *xen, domid_t domID) {
    do {
        xc_dominfo_t info = { 0 };

        if (1 == xc_domain_getinfo(xen->xc, domID, 1, &info) && info.domid == domID && info.paused)
            xc_domain_unpause(xen->xc, domID);
        else
            break;

    } while (1);
}
Example #11
0
/* Get the RTDS scheduling parameters of vcpu(s) */
static int sched_rtds_vcpu_get(libxl__gc *gc, uint32_t domid,
                               libxl_vcpu_sched_params *scinfo)
{
    uint32_t num_vcpus;
    int i, r, rc;
    xc_dominfo_t info;
    struct xen_domctl_schedparam_vcpu *vcpus;

    r = xc_domain_getinfo(CTX->xch, domid, 1, &info);
    if (r < 0) {
        LOGED(ERROR, domid, "Getting domain info");
        rc = ERROR_FAIL;
        goto out;
    }

    if (scinfo->num_vcpus <= 0) {
        rc = ERROR_INVAL;
        goto out;
    } else {
        num_vcpus = scinfo->num_vcpus;
        GCNEW_ARRAY(vcpus, num_vcpus);
        for (i = 0; i < num_vcpus; i++) {
            if (scinfo->vcpus[i].vcpuid < 0 ||
                scinfo->vcpus[i].vcpuid > info.max_vcpu_id) {
                LOGD(ERROR, domid, "VCPU index is out of range, "
                            "valid values are within range from 0 to %d",
                            info.max_vcpu_id);
                rc = ERROR_INVAL;
                goto out;
            }
            vcpus[i].vcpuid = scinfo->vcpus[i].vcpuid;
        }
    }

    r = xc_sched_rtds_vcpu_get(CTX->xch, domid, vcpus, num_vcpus);
    if (r != 0) {
        LOGED(ERROR, domid, "Getting vcpu sched rtds");
        rc = ERROR_FAIL;
        goto out;
    }
    scinfo->sched = LIBXL_SCHEDULER_RTDS;
    for (i = 0; i < num_vcpus; i++) {
        scinfo->vcpus[i].period = vcpus[i].u.rtds.period;
        scinfo->vcpus[i].budget = vcpus[i].u.rtds.budget;
        scinfo->vcpus[i].vcpuid = vcpus[i].vcpuid;
    }
    rc = 0;
out:
    return rc;
}
Example #12
0
static int modify_returncode(int xc_handle, uint32_t domid)
{
    vcpu_guest_context_any_t ctxt;
    xc_dominfo_t info;
    xen_capabilities_info_t caps;
    struct domain_info_context _dinfo = {};
    struct domain_info_context *dinfo = &_dinfo;
    int rc;

    if ( xc_domain_getinfo(xc_handle, domid, 1, &info) != 1 )
    {
        PERROR("Could not get domain info");
        return -1;
    }

    if ( info.hvm )
    {
        /* HVM guests without PV drivers have no return code to modify. */
        unsigned long irq = 0;
        xc_get_hvm_param(xc_handle, domid, HVM_PARAM_CALLBACK_IRQ, &irq);
        if ( !irq )
            return 0;

        /* HVM guests have host address width. */
        if ( xc_version(xc_handle, XENVER_capabilities, &caps) != 0 )
        {
            PERROR("Could not get Xen capabilities\n");
            return -1;
        }
        dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4;
    }
    else
    {
        /* Probe PV guest address width. */
        dinfo->guest_width = pv_guest_width(xc_handle, domid);
        if ( dinfo->guest_width < 0 )
            return -1;
    }

    if ( (rc = xc_vcpu_getcontext(xc_handle, domid, 0, &ctxt)) != 0 )
        return rc;

    SET_FIELD(&ctxt, user_regs.eax, 1);

    if ( (rc = xc_vcpu_setcontext(xc_handle, domid, 0, &ctxt)) != 0 )
        return rc;

    return 0;
}
Example #13
0
/* returns -1 on error or death, 0 if domain is running, 1 if suspended */
static int check_shutdown(checkpoint_state* s) {
    unsigned int count;
    int xsfd;
    char **vec;
    char buf[16];
    xc_dominfo_t info;

    xsfd = xs_fileno(s->xsh);

    /* loop on watch if it fires for another domain */
    while (1) {
	if (pollfd(s, xsfd) < 0)
	    return -1;

	vec = xs_read_watch(s->xsh, &count);
	if (s->watching_shutdown == 1) {
	    s->watching_shutdown = 2;
	    return 0;
	}
	if (!vec) {
	    fprintf(stderr, "empty watch fired\n");
	    continue;
	}
	snprintf(buf, sizeof(buf), "%d", s->domid);
	if (!strcmp(vec[XS_WATCH_TOKEN], buf))
	    break;
    }

    if (xc_domain_getinfo(s->xch, s->domid, 1, &info) != 1
	|| info.domid != s->domid) {
	snprintf(errbuf, sizeof(errbuf),
		 "error getting info for domain %u", s->domid);
	s->errstr = errbuf;
	return -1;
    }
    if (!info.shutdown) {
	snprintf(errbuf, sizeof(errbuf),
		 "domain %u not shut down", s->domid);
	s->errstr = errbuf;
	return 0;
    }

    if (info.shutdown_reason != SHUTDOWN_suspend)
	return -1;

    return 1;
}
Example #14
0
/* Set the RTDS scheduling parameters of all vcpus of a domain */
static int sched_rtds_vcpu_set_all(libxl__gc *gc, uint32_t domid,
                                   const libxl_vcpu_sched_params *scinfo)
{
    int r, rc;
    int i;
    uint16_t max_vcpuid;
    xc_dominfo_t info;
    struct xen_domctl_schedparam_vcpu *vcpus;
    uint32_t num_vcpus;

    r = xc_domain_getinfo(CTX->xch, domid, 1, &info);
    if (r < 0) {
        LOGED(ERROR, domid, "Getting domain info");
        rc = ERROR_FAIL;
        goto out;
    }
    max_vcpuid = info.max_vcpu_id;

    if (scinfo->num_vcpus != 1) {
        rc = ERROR_INVAL;
        goto out;
    }
    if (sched_rtds_validate_params(gc, scinfo->vcpus[0].period,
                                   scinfo->vcpus[0].budget)) {
        rc = ERROR_INVAL;
        goto out;
    }
    num_vcpus = max_vcpuid + 1;
    GCNEW_ARRAY(vcpus, num_vcpus);
    for (i = 0; i < num_vcpus; i++) {
        vcpus[i].vcpuid = i;
        vcpus[i].u.rtds.period = scinfo->vcpus[0].period;
        vcpus[i].u.rtds.budget = scinfo->vcpus[0].budget;
    }

    r = xc_sched_rtds_vcpu_set(CTX->xch, domid,
                               vcpus, num_vcpus);
    if (r != 0) {
        LOGED(ERROR, domid, "Setting vcpu sched rtds");
        rc = ERROR_FAIL;
        goto out;
    }
    rc = 0;
out:
    return rc;
}
Example #15
0
/* Get the RTDS scheduling parameters of all vcpus of a domain */
static int sched_rtds_vcpu_get_all(libxl__gc *gc, uint32_t domid,
                                   libxl_vcpu_sched_params *scinfo)
{
    uint32_t num_vcpus;
    int i, r, rc;
    xc_dominfo_t info;
    struct xen_domctl_schedparam_vcpu *vcpus;

    r = xc_domain_getinfo(CTX->xch, domid, 1, &info);
    if (r < 0) {
        LOGED(ERROR, domid, "Getting domain info");
        rc = ERROR_FAIL;
        goto out;
    }

    if (scinfo->num_vcpus > 0) {
        rc = ERROR_INVAL;
        goto out;
    } else {
        num_vcpus = info.max_vcpu_id + 1;
        GCNEW_ARRAY(vcpus, num_vcpus);
        for (i = 0; i < num_vcpus; i++)
            vcpus[i].vcpuid = i;
    }

    r = xc_sched_rtds_vcpu_get(CTX->xch, domid, vcpus, num_vcpus);
    if (r != 0) {
        LOGED(ERROR, domid, "Getting vcpu sched rtds");
        rc = ERROR_FAIL;
        goto out;
    }
    scinfo->sched = LIBXL_SCHEDULER_RTDS;
    scinfo->num_vcpus = num_vcpus;
    scinfo->vcpus = libxl__calloc(NOGC, num_vcpus,
                                  sizeof(libxl_sched_params));

    for (i = 0; i < num_vcpus; i++) {
        scinfo->vcpus[i].period = vcpus[i].u.rtds.period;
        scinfo->vcpus[i].budget = vcpus[i].u.rtds.budget;
        scinfo->vcpus[i].vcpuid = vcpus[i].vcpuid;
    }
    rc = 0;
out:
    return rc;
}
Example #16
0
void handle_low_mem(void)
{
    xc_dominfo_t  dom0_info;
    xc_physinfo_t info;
    unsigned long long free_pages, dom0_pages, diff, dom0_target;
    char data[BUFSZ], error[BUFSZ];

    if (xc_physinfo(xch, &info) < 0)
    {
        perror("Getting physinfo failed");
        return;
    }

    free_pages = (unsigned long long) info.free_pages;
    printf("Available free pages: 0x%llx:%llux\n",
            free_pages, free_pages);

    /* Don't do anything if we have more than the threshold free */
    if ( free_pages >= THRESHOLD_PG )
        return;
    diff = THRESHOLD_PG - free_pages; 

    if (xc_domain_getinfo(xch, 0, 1, &dom0_info) < 1)
    {
        perror("Failed to get dom0 info");
        return;
    }

    dom0_pages = (unsigned long long) dom0_info.nr_pages;
    printf("Dom0 pages: 0x%llx:%llu\n", dom0_pages, dom0_pages);
    dom0_target = dom0_pages - diff;
    if (dom0_target <= DOM0_FLOOR_PG)
        return;

    printf("Shooting for dom0 target 0x%llx:%llu\n", 
            dom0_target, dom0_target);

    snprintf(data, BUFSZ, "%llu", dom0_target);
    if (!xs_write(xs_handle, XBT_NULL, 
            "/local/domain/0/memory/target", data, strlen(data)))
    {
        snprintf(error, BUFSZ,"Failed to write target %s to xenstore", data);
        perror(error);
    }
}
Example #17
0
static int
xc_ia64_pv_recv_context_ver_three(int xc_handle, int io_fd, uint32_t dom,
                                  unsigned long shared_info_frame,
                                  struct xen_ia64_p2m_table *p2m_table,
                                  unsigned int store_evtchn,
                                  unsigned long *store_mfn,
                                  unsigned int console_evtchn,
                                  unsigned long *console_mfn)
{
    int rc = -1;
    xc_dominfo_t info;
    unsigned int i;
    
    /* vcpu map */
    uint64_t *vcpumap = NULL;
    
    if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
        ERROR("Could not get domain info");
        return -1;
    }
    rc = xc_ia64_recv_vcpumap(&info, io_fd, &vcpumap);
    if (rc != 0)
        goto out;

    /* vcpu context */
    for (i = 0; i <= info.max_vcpu_id; i++) {
        if (!__test_bit(i, vcpumap))
            continue;

        rc = xc_ia64_pv_recv_vcpu_context(xc_handle, io_fd, dom, i);
        if (rc != 0)
            goto out;
    }    

    /* shared_info */
    rc = xc_ia64_pv_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
                                     p2m_table, store_evtchn, store_mfn,
                                     console_evtchn, console_mfn);
 out:
    if (vcpumap != NULL)
        free(vcpumap);
    return rc;
}
Example #18
0
bool XenDriver::cpuCount( unsigned int &count ) const throw()
{
	xc_dominfo_t info;

	StatsCollector::instance().incStat( "xcDomainInfo" );

	if ( xc_domain_getinfo( xci_, domain_, 1, &info ) != 1 ) {

		if ( logHelper_ )
			logHelper_->error( std::string( "xc_domain_getinfo() failed: " ) + strerror( errno ) );

		return false;
	}

	count = info.max_vcpu_id + 1;
	// count = info.nr_online_vcpus;

	return true;
}
Example #19
0
static void
domain_read_is_pv_domain(struct domain *d)
{
    xc_dominfo_t info;
    int ret;

    d->is_pv_domain = 0;

    ret = xc_domain_getinfo(xc_handle, d->domid, 1, &info);
    if (ret != 1) {
        warning("xc_domain_getinfo() failed (%s).", strerror(errno));
        return;
    }
    if (info.domid != (uint32_t) d->domid) {
        warning("xc_domain_getinfo() reports a different domid (input:%d vs getinfo:%d).", d->domid, info.domid);
        return;
    }

    d->is_pv_domain = !info.hvm;
}
Example #20
0
static int check_domain(xc_interface *xch)
{
    xc_dominfo_t info;
    uint32_t dom;
    int ret;

    dom = 1;
    while ( (ret = xc_domain_getinfo(xch, dom, 1, &info)) == 1 )
    {
        if ( info.xenstore )
            return 1;
        dom = info.domid + 1;
    }
    if ( ret < 0 && errno != ESRCH )
    {
        fprintf(stderr, "xc_domain_getinfo failed\n");
        return ret;
    }

    return 0;
}
Example #21
0
unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
                                           int vcpu, unsigned long long virt)
{
    xc_dominfo_t dominfo;
    uint64_t paddr, mask, pte = 0;
    int size, level, pt_levels = 2;
    void *map;

    if (xc_domain_getinfo(xc_handle, dom, 1, &dominfo) != 1 
        || dominfo.domid != dom)
        return 0;

    /* What kind of paging are we dealing with? */
    if (dominfo.hvm) {
        struct hvm_hw_cpu ctx;
        if (xc_domain_hvm_getcontext_partial(xc_handle, dom,
                                             HVM_SAVE_CODE(CPU), vcpu,
                                             &ctx, sizeof ctx) != 0)
            return 0;
        if (!(ctx.cr0 & CR0_PG))
            return virt >> PAGE_SHIFT;
        pt_levels = (ctx.msr_efer&EFER_LMA) ? 4 : (ctx.cr4&CR4_PAE) ? 3 : 2;
        paddr = ctx.cr3 & ((pt_levels == 3) ? ~0x1full : ~0xfffull);
    } else {
Example #22
0
long xc_get_tot_pages(xc_interface *xch, uint32_t domid)
{
    xc_dominfo_t info;
    return (xc_domain_getinfo(xch, domid, 1, &info) != 1) ?
        -1 : info.nr_pages;
}
Example #23
0
/* given a xa_instance_t struct with the xc_handle and the
 * domain_id filled in, this function will fill in the rest
 * of the values using queries to libxc. */
int helper_init (xa_instance_t *instance)
{
    int ret = XA_SUCCESS;
    uint32_t local_offset = 0;
    unsigned char *memory = NULL;

    if (XA_MODE_XEN == instance->mode){
#ifdef ENABLE_XEN
        /* init instance->m.xen.xc_handle */
        if (xc_domain_getinfo(
                instance->m.xen.xc_handle, instance->m.xen.domain_id,
                1, &(instance->m.xen.info)
            ) != 1){
            fprintf(stderr, "ERROR: Failed to get domain info\n");
            ret = xa_report_error(instance, 0, XA_ECRITICAL);
            if (XA_FAILURE == ret) goto error_exit;
        }
        xa_dbprint("--got domain info.\n");

        /* find the version of xen that we are running */
        init_xen_version(instance);
#endif /* ENABLE_XEN */
    }

    /* read in configure file information */
    if (read_config_file(instance) == XA_FAILURE){
        ret = xa_report_error(instance, 0, XA_EMINOR);
        if (XA_FAILURE == ret) goto error_exit;
    }
    
    /* determine the page sizes and layout for target OS */
    if (XA_MODE_XEN == instance->mode){
#ifdef ENABLE_XEN
        if (get_page_info_xen(instance) == XA_FAILURE){
            fprintf(stderr, "ERROR: memory layout not supported\n");
            ret = xa_report_error(instance, 0, XA_ECRITICAL);
            if (XA_FAILURE == ret) goto error_exit;
        }
#endif /* ENABLE_XEN */
    }
    else{
        /*TODO add memory layout discovery here for file */
        instance->hvm = 1; /* assume nonvirt image or hvm image for now */
        instance->pae = 0; /* assume no pae for now */
    }
    xa_dbprint("--got memory layout.\n");

    /* setup the correct page offset size for the target OS */
    init_page_offset(instance);

    if (XA_MODE_XEN == instance->mode){
#ifdef ENABLE_XEN
        /* init instance->hvm */
        instance->hvm = xa_ishvm(instance->m.xen.domain_id);
#ifdef XA_DEBUG
        if (instance->hvm){
            xa_dbprint("**set instance->hvm to true (HVM).\n");
        }
        else{
            xa_dbprint("**set instance->hvm to false (PV).\n");
        }
#endif /* XA_DEBUG */
#endif /* ENABLE_XEN */
    }

    /* get the memory size */
    if (get_memory_size(instance) == XA_FAILURE){
        fprintf(stderr, "ERROR: Failed to get memory size.\n");
        ret = xa_report_error(instance, 0, XA_ECRITICAL);
        if (XA_FAILURE == ret) goto error_exit;
    }

    /* setup OS specific stuff */
    if (instance->os_type == XA_OS_LINUX){
        ret = linux_init(instance);
    }
    else if (instance->os_type == XA_OS_WINDOWS){
        ret = windows_init(instance);
    }

error_exit:
    return ret;
}
Example #24
0
int xc_dom_boot_image(struct xc_dom_image *dom)
{
    DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
    xc_dominfo_t info;
    int rc;

    ctxt = xc_hypercall_buffer_alloc(dom->xch, ctxt, sizeof(*ctxt));
    if ( ctxt == NULL )
        return -1;

    DOMPRINTF_CALLED(dom->xch);

    /* misc stuff*/
    if ( (rc = arch_setup_bootearly(dom)) != 0 )
        return rc;

    /* collect some info */
    rc = xc_domain_getinfo(dom->xch, dom->guest_domid, 1, &info);
    if ( rc < 0 )
    {
        xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
                     "%s: getdomaininfo failed (rc=%d)", __FUNCTION__, rc);
        return rc;
    }
    if ( rc == 0 || info.domid != dom->guest_domid )
    {
        xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
                     "%s: Huh? No domains found (nr_domains=%d) "
                     "or domid mismatch (%d != %d)", __FUNCTION__,
                     rc, info.domid, dom->guest_domid);
        return -1;
    }
    dom->shared_info_mfn = info.shared_info_frame;

    /* sanity checks */
    if ( !xc_dom_compat_check(dom) )
        return -1;

    /* initial mm setup */
    if ( (rc = xc_dom_update_guest_p2m(dom)) != 0 )
        return rc;
    if ( dom->arch_hooks->setup_pgtables )
        if ( (rc = dom->arch_hooks->setup_pgtables(dom)) != 0 )
            return rc;

    if ( (rc = clear_page(dom, dom->console_pfn)) != 0 )
        return rc;
    if ( (rc = clear_page(dom, dom->xenstore_pfn)) != 0 )
        return rc;

    /* start info page */
    if ( dom->arch_hooks->start_info )
        dom->arch_hooks->start_info(dom);

    /* hypercall page */
    if ( (rc = setup_hypercall_page(dom)) != 0 )
        return rc;
    xc_dom_log_memory_footprint(dom);

    /* misc x86 stuff */
    if ( (rc = arch_setup_bootlate(dom)) != 0 )
        return rc;

    /* let the vm run */
    memset(ctxt, 0, sizeof(*ctxt));
    if ( (rc = dom->arch_hooks->vcpu(dom, ctxt)) != 0 )
        return rc;
    xc_dom_unmap_all(dom);
    rc = launch_vm(dom->xch, dom->guest_domid, ctxt);

    xc_hypercall_buffer_free(dom->xch, ctxt);
    return rc;
}
Example #25
0
/* open a checkpoint session to guest domid */
int checkpoint_open(checkpoint_state* s, unsigned int domid)
{
    xc_dominfo_t dominfo;
    unsigned long pvirq;

    s->domid = domid;

    s->xch = xc_interface_open();
    if (s->xch < 0) {
       s->errstr = "could not open control interface (are you root?)";

       return -1;
    }

    s->xsh = xs_daemon_open();
    if (!s->xsh) {
       checkpoint_close(s);
       s->errstr = "could not open xenstore handle";

       return -1;
    }

    s->xce = xc_evtchn_open();
    if (s->xce < 0) {
       checkpoint_close(s);
       s->errstr = "could not open event channel handle";

       return -1;
    }

    if (xc_domain_getinfo(s->xch, s->domid, 1, &dominfo) < 0) {
       checkpoint_close(s);
       s->errstr = "could not get domain info";

       return -1;
    }
    if (dominfo.hvm) {
       if (xc_get_hvm_param(s->xch, s->domid, HVM_PARAM_CALLBACK_IRQ, &pvirq)) {
           checkpoint_close(s);
           s->errstr = "could not get HVM callback IRQ";

           return -1;
       }
       s->domtype = pvirq ? dt_pvhvm : dt_hvm;
    } else
       s->domtype = dt_pv;

    if (setup_shutdown_watch(s) < 0) {
       checkpoint_close(s);

       return -1;
    }

    if (s->domtype == dt_pv) {
	if (setup_suspend_evtchn(s) < 0) {
	    fprintf(stderr, "WARNING: suspend event channel unavailable, "
		    "falling back to slow xenstore signalling\n");
	}
    } else if (s->domtype == dt_pvhvm) {
       checkpoint_close(s);
       s->errstr = "PV-on-HVM is unsupported";

       return -1;
    }

    return 0;
}
Example #26
0
int main(int argc, char **argv)
{
	struct termios attr;
	int domid;
	char *sopt = "hn:";
	int ch;
	unsigned int num = 0;
	int opt_ind=0;
	struct option lopt[] = {
		{ "type",     1, 0, 't' },
		{ "num",     1, 0, 'n' },
		{ "help",    0, 0, 'h' },
		{ 0 },

	};
	char *dom_path = NULL, *path = NULL;
	int spty, xsfd;
	struct xs_handle *xs;
	char *end;
	console_type type = CONSOLE_INVAL;

	while((ch = getopt_long(argc, argv, sopt, lopt, &opt_ind)) != -1) {
		switch(ch) {
		case 'h':
			usage(argv[0]);
			exit(0);
			break;
		case 'n':
			num = atoi(optarg);
			break;
		case 't':
			if (!strcmp(optarg, "serial"))
				type = CONSOLE_SERIAL;
			else if (!strcmp(optarg, "pv"))
				type = CONSOLE_PV;
			else {
				fprintf(stderr, "Invalid type argument\n");
				fprintf(stderr, "Console types supported are: serial, pv\n");
				exit(EINVAL);
			}
			break;
		default:
			fprintf(stderr, "Invalid argument\n");
			fprintf(stderr, "Try `%s --help' for more information.\n", 
					argv[0]);
			exit(EINVAL);
		}
	}

	if (optind >= argc) {
		fprintf(stderr, "DOMID should be specified\n");
		fprintf(stderr, "Try `%s --help' for more information.\n",
			argv[0]);
		exit(EINVAL);
	}
	domid = strtol(argv[optind], &end, 10);
	if (end && *end) {
		fprintf(stderr, "Invalid DOMID `%s'\n", argv[optind]);
		fprintf(stderr, "Try `%s --help' for more information.\n",
			argv[0]);
		exit(EINVAL);
	}

	xs = xs_daemon_open();
	if (xs == NULL) {
		err(errno, "Could not contact XenStore");
	}

	signal(SIGTERM, sighandler);

	dom_path = xs_get_domain_path(xs, domid);
	if (dom_path == NULL)
		err(errno, "xs_get_domain_path()");
	if (type == CONSOLE_INVAL) {
		xc_dominfo_t xcinfo;
		xc_interface *xc_handle = xc_interface_open(0,0,0);
		if (xc_handle == NULL)
			err(errno, "Could not open xc interface");
		xc_domain_getinfo(xc_handle, domid, 1, &xcinfo);
		/* default to pv console for pv guests and serial for hvm guests */
		if (xcinfo.hvm)
			type = CONSOLE_SERIAL;
		else
			type = CONSOLE_PV;
		xc_interface_close(xc_handle);
	}
	path = malloc(strlen(dom_path) + strlen("/device/console/0/tty") + 5);
	if (path == NULL)
		err(ENOMEM, "malloc");
	if (type == CONSOLE_SERIAL)
		snprintf(path, strlen(dom_path) + strlen("/serial/0/tty") + 5, "%s/serial/%d/tty", dom_path, num);
	else {
		if (num == 0)
			snprintf(path, strlen(dom_path) + strlen("/console/tty") + 1, "%s/console/tty", dom_path);
		else
			snprintf(path, strlen(dom_path) + strlen("/device/console/%d/tty") + 5, "%s/device/console/%d/tty", dom_path, num);
	}

	/* FIXME consoled currently does not assume domain-0 doesn't have a
	   console which is good when we break domain-0 up.  To keep us
	   user friendly, we'll bail out here since no data will ever show
	   up on domain-0. */
	if (domid == 0) {
		fprintf(stderr, "Can't specify Domain-0\n");
		exit(EINVAL);
	}

	/* Set a watch on this domain's console pty */
	if (!xs_watch(xs, path, ""))
		err(errno, "Can't set watch for console pty");
	xsfd = xs_fileno(xs);

	/* Wait a little bit for tty to appear.  There is a race
	   condition that occurs after xend creates a domain.  This code
	   might be running before consoled has noticed the new domain
	   and setup a pty for it. */ 
        spty = get_pty_fd(xs, path, 5);
	if (spty == -1) {
		err(errno, "Could not read tty from store");
	}

	init_term(spty, &attr);
	init_term(STDIN_FILENO, &attr);
	console_loop(spty, xs, path);
	restore_term(STDIN_FILENO, &attr);

	free(path);
	free(dom_path);
	return 0;
 }
bool XenDomainWatcher::waitForDomainsOrTimeout( std::list<DomainInfo> &domains, int ms )
{
	struct pollfd fd;
	bool ret = false;

	fd.revents = 0;
	fd.fd = xs_fileno( xsh_ );
	fd.events = POLLIN | POLLERR;

	int rc = poll( &fd, 1, ms );

	domains.clear();

	if ( rc == 0 )
		return false; // timeout

	if ( fd.revents & POLLIN ) {

		unsigned int num;
		char **vec = xs_read_watch( xsh_, &num );

		if ( vec && introduceToken_ == vec[XS_WATCH_TOKEN] ) {

			int domid = 1;
			xc_dominfo_t dominfo;
			int err = -1;

			while ( ( err = xc_domain_getinfo( xci_, domid, 1, &dominfo ) ) == 1 ) {
				domid = dominfo.domid + 1;

				if ( xs_is_domain_introduced( xsh_, dominfo.domid ) ) {

					// New domain
					if ( domIds_.find( dominfo.domid ) == domIds_.end() ) {

						domIds_.insert( dominfo.domid );

						std::stringstream ss;
						ss << "/local/domain/" << dominfo.domid << "/name";

						std::string path = ss.str();

						errno = 0;
						char *name = static_cast<char *>(
						        xs_read_timeout( xsh_, XBT_NULL, path.c_str(), NULL, 1 ) );

						if ( !name && errno && logHelper_ )
							logHelper_->error( std::string( "xs_read() error reading " ) +
							                   ss.str() + ": " + strerror( errno ) );

						if ( name ) { // domain running or new domain w name set

							ss.str( "" );
							ss << "/local/domain/" << dominfo.domid << "/console/tty";
							path = ss.str();

							DomainInfo domain;

							errno = 0;
							void *console = xs_read_timeout( xsh_, XBT_NULL, path.c_str(),
							                                 NULL, 1 );

							if ( !console && errno && logHelper_ )
								logHelper_->error(
								        std::string( "xs_read() error reading " ) +
								        ss.str() + ": " + strerror( errno ) );

							if ( console ) {
								free( console );
								domain.isAlreadyRunning = true;
							}
							else {
								domain.isAlreadyRunning = false;
							}

							domain.name = name;
							free( name );

							domains.push_back( domain );
							ret = true;
						}
						else { // new domain, name not yet set
							ss.str( "" );
							ss << "dom" << dominfo.domid;
							xs_watch( xsh_, path.c_str(), ss.str().c_str() );
						}
					}
				}
			}

			if ( err == -1 && ( errno == EACCES || errno == EPERM ) )
				throw Exception( "access denied for xc_domain_getinfo()" );
		}

		if ( vec && releaseToken_ == vec[XS_WATCH_TOKEN] ) {

			int domid = 1;
			xc_dominfo_t dominfo;

			while ( xc_domain_getinfo( xci_, domid, 1, &dominfo ) == 1 ) {
				domid = dominfo.domid + 1;

				if ( !xs_is_domain_introduced( xsh_, dominfo.domid ) )
					domIds_.erase( dominfo.domid );
			}
		}

		if ( vec && !strncmp( vec[XS_WATCH_TOKEN], "dom", 3 ) ) {

			int domid = 1;
			if ( sscanf( vec[XS_WATCH_TOKEN], "dom%u", &domid ) == 1 ) {

				char *name = static_cast<char *>(
				        xs_read_timeout( xsh_, XBT_NULL, vec[XS_WATCH_PATH], NULL, 1 ) );

				if ( name ) {
					DomainInfo domain;
					domain.isAlreadyRunning = false;
					domain.name = name;
					free( name );

					domains.push_back( domain );
					xs_unwatch( xsh_, vec[XS_WATCH_PATH], vec[XS_WATCH_TOKEN] );

					ret = true;
				}
			}
		}

		free( vec );
	}

	return ret;
}
Example #28
0
static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
{
    DECLARE_DOMCTL;
    xc_dominfo_t info;
    int i, rc = -1;
#if defined(__i386__) || defined(__x86_64__)
    struct domain_info_context _dinfo = { .guest_width = 0,
                                          .p2m_size = 0 };
    struct domain_info_context *dinfo = &_dinfo;
    unsigned long mfn;
    vcpu_guest_context_any_t ctxt;
    start_info_t *start_info;
    shared_info_t *shinfo = NULL;
    xen_pfn_t *p2m_frame_list_list = NULL;
    xen_pfn_t *p2m_frame_list = NULL;
    xen_pfn_t *p2m = NULL;
#endif

    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
    {
        PERROR("Could not get domain info");
        return rc;
    }

    /*
     * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
     */
#if defined(__i386__) || defined(__x86_64__)
    if ( info.hvm )
    {
        ERROR("Cannot resume uncooperative HVM guests");
        return rc;
    }

    if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
    {
        PERROR("Could not get domain width");
        return rc;
    }
    if ( dinfo->guest_width != sizeof(long) )
    {
        ERROR("Cannot resume uncooperative cross-address-size guests");
        return rc;
    }

    /* Map the shared info frame */
    shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
                                  PROT_READ, info.shared_info_frame);
    if ( shinfo == NULL )
    {
        ERROR("Couldn't map shared info");
        goto out;
    }

    dinfo->p2m_size = shinfo->arch.max_pfn;

    p2m_frame_list_list =
        xc_map_foreign_range(xch, domid, PAGE_SIZE, PROT_READ,
                             shinfo->arch.pfn_to_mfn_frame_list_list);
    if ( p2m_frame_list_list == NULL )
    {
        ERROR("Couldn't map p2m_frame_list_list");
        goto out;
    }

    p2m_frame_list = xc_map_foreign_pages(xch, domid, PROT_READ,
                                          p2m_frame_list_list,
                                          P2M_FLL_ENTRIES);
    if ( p2m_frame_list == NULL )
    {
        ERROR("Couldn't map p2m_frame_list");
        goto out;
    }

    /* Map all the frames of the pfn->mfn table. For migrate to succeed,
       the guest must not change which frames are used for this purpose.
       (its not clear why it would want to change them, and we'll be OK
       from a safety POV anyhow. */
    p2m = xc_map_foreign_pages(xch, domid, PROT_READ,
                               p2m_frame_list,
                               P2M_FL_ENTRIES);
    if ( p2m == NULL )
    {
        ERROR("Couldn't map p2m table");
        goto out;
    }

    if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
    {
        ERROR("Could not get vcpu context");
        goto out;
    }

    mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);

    start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
                                      PROT_READ | PROT_WRITE, mfn);
    if ( start_info == NULL )
    {
        ERROR("Couldn't map start_info");
        goto out;
    }

    start_info->store_mfn        = p2m[start_info->store_mfn];
    start_info->console.domU.mfn = p2m[start_info->console.domU.mfn];

    munmap(start_info, PAGE_SIZE);
#endif /* defined(__i386__) || defined(__x86_64__) */

    /* Reset all secondary CPU states. */
    for ( i = 1; i <= info.max_vcpu_id; i++ )
        if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
        {
            ERROR("Couldn't reset vcpu state");
            goto out;
        }

    /* Ready to resume domain execution now. */
    domctl.cmd = XEN_DOMCTL_resumedomain;
    domctl.domain = domid;
    rc = do_domctl(xch, &domctl);

out:
#if defined(__i386__) || defined(__x86_64__)
    if (p2m)
        munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
    if (p2m_frame_list)
        munmap(p2m_frame_list, P2M_FLL_ENTRIES*PAGE_SIZE);
    if (p2m_frame_list_list)
        munmap(p2m_frame_list_list, PAGE_SIZE);
    if (shinfo)
        munmap(shinfo, PAGE_SIZE);
#endif

    return rc;
}

/*
 * Resume execution of a domain after suspend shutdown.
 * This can happen in one of two ways:
 *  1. Resume with special return code.
 *  2. Reset guest environment so it believes it is resumed in a new
 *     domain context.
 * (2) should be used only for guests which cannot handle the special
 * new return code. (1) is always safe (but slower).
 */
int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
{
    return (fast
            ? xc_domain_resume_cooperative(xch, domid)
            : xc_domain_resume_any(xch, domid));
}
Example #29
0
static int
xc_ia64_hvm_recv_context(int xc_handle, int io_fd, uint32_t dom,
                         unsigned long shared_info_frame,
                         struct xen_ia64_p2m_table *p2m_table,
                         unsigned int store_evtchn, unsigned long *store_mfn,
                         unsigned int console_evtchn,
                         unsigned long *console_mfn)
{
    int rc = -1;
    xc_dominfo_t info;
    unsigned int i;
    
    /* cpumap */
    uint64_t *vcpumap = NULL;

    /* HVM: magic frames for ioreqs and xenstore comms */
    const int hvm_params[] = {
        HVM_PARAM_STORE_PFN,
        HVM_PARAM_IOREQ_PFN,
        HVM_PARAM_BUFIOREQ_PFN,
        HVM_PARAM_BUFPIOREQ_PFN,
    };
    const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
    /* ioreq_pfn, bufioreq_pfn, store_pfn */
    uint64_t magic_pfns[NR_PARAMS];

    /* HVM: a buffer for holding HVM contxt */
    uint64_t rec_size = 0;
    uint8_t *hvm_buf = NULL;

    /* Read shared info.  */
    if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
                                 NULL))
        goto out;

    /* vcpu map */
    if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
        ERROR("Could not get domain info");
        goto out;
    }
    if (xc_ia64_recv_vcpumap(&info, io_fd, &vcpumap))
        goto out;
    
    /* vcpu context */
    for (i = 0; i <= info.max_vcpu_id; i++) {
        /* A copy of the CPU context of the guest. */
        vcpu_guest_context_any_t ctxt_any;

        if (!__test_bit(i, vcpumap))
            continue;

        if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, i, &ctxt_any))
            goto out;

        /* system context of vcpu is recieved as hvm context. */
    }    

    /* Set HVM-specific parameters */
    if (read_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
        ERROR("error reading magic page addresses");
        goto out;
    }

    /* These comms pages need to be zeroed at the start of day */
    for (i = 0; i < NR_PARAMS; i++) {
        rc = xc_clear_domain_page(xc_handle, dom, magic_pfns[i]);
        if (rc != 0) {
            ERROR("error zeroing magic pages: %i", rc);
            goto out;
        }
        rc = xc_set_hvm_param(xc_handle, dom, hvm_params[i], magic_pfns[i]);
        if (rc != 0) {
            ERROR("error setting HVM params: %i", rc);
            goto out;
        }
    }
    rc = xc_set_hvm_param(xc_handle, dom,
                          HVM_PARAM_STORE_EVTCHN, store_evtchn);
    if (rc != 0) {
        ERROR("error setting HVM params: %i", rc);
        goto out;
    }
    rc = -1;
    *store_mfn = magic_pfns[0];

    /* Read HVM context */
    if (read_exact(io_fd, &rec_size, sizeof(rec_size))) {
        ERROR("error read hvm context size!\n");
        goto out;
    }

    hvm_buf = malloc(rec_size);
    if (hvm_buf == NULL) {
        ERROR("memory alloc for hvm context buffer failed");
        errno = ENOMEM;
        goto out;
    }

    if (read_exact(io_fd, hvm_buf, rec_size)) {
        ERROR("error loading the HVM context");
        goto out;
    }

    rc = xc_domain_hvm_setcontext(xc_handle, dom, hvm_buf, rec_size);
    if (rc != 0) {
        ERROR("error setting the HVM context");
        goto out;
    }
       
    rc = 0;

out:
    if (vcpumap != NULL)
        free(vcpumap);
    if (hvm_buf != NULL)
        free(hvm_buf);
    return rc;
}
Example #30
0
int main(int argc, char **argv) {

    if (argc < 3) {
        printf("Usage: %s origin-domID clone-domID\n", argv[0]);
        return 1;
    }

    domid_t origin = atoi(argv[1]), clone = atoi(argv[2]);

    xc_interface *xc = xc_interface_open(0, 0, 0);
    vcpu_guest_context_any_t vcpu_context;

    uint32_t hvm_context_size;
    uint8_t *hvm_context;

    if (xc == NULL) {
        fprintf(stderr, "xc_interface_open() failed!\n");
        return 0;
    }

    if (xc_vcpu_getcontext(xc, origin, 0, &vcpu_context)) {
        printf("Failed to get the VCPU context of domain %u\n", origin);
        return 1;
    }

    printf("Setting VCPU context of clone\n");

    if (xc_vcpu_setcontext(xc, clone, 0, &vcpu_context)) {
        printf("Failed to set the VCPU context of domain %u\n", clone);
    }

    /*printf("Setting HVM parameters of clone\n");

    int hvm_param_copy = 0;
    while (hvm_param_copy < HVM_PARAM_COUNT) {
        unsigned long value = 0;
        xc_get_hvm_param(xc, origin, hvm_params[hvm_param_copy], &value);
        if (value) {
            switch (hvm_params[hvm_param_copy]) {
            case HVM_PARAM_CONSOLE_PFN:
            case HVM_PARAM_IOREQ_PFN:
            case HVM_PARAM_BUFIOREQ_PFN:
            case HVM_PARAM_STORE_PFN:
                break;
            default:
                printf("Setting HVM param %i with value %lu\n",
                    hvm_params[hvm_param_copy], value);

                xc_set_hvm_param(xc, clone, hvm_params[hvm_param_copy], value);
            break;
            }
        }
        hvm_param_copy++;
    }*/

    hvm_context_size = xc_domain_hvm_getcontext(xc, origin, NULL, 0);
    if (hvm_context_size <= 0) {
        printf("HVM context size <= 0. Not an HVM domain?\n");
        return 1;
    }

    hvm_context = malloc(hvm_context_size * sizeof(uint8_t));

    if (xc_domain_hvm_getcontext(xc, origin, hvm_context, hvm_context_size)
            <= 0) {
        printf("Failed to get HVM context.\n");
        return 1;
    }

    xc_dominfo_t info;
    xc_domain_getinfo(xc, origin, 1, &info);
    int page = xc_domain_maximum_gpfn(xc, origin) + 1;
    printf("Sharing memory.. Origin domain has %lu kb ram and %i pages.\n",
            info.max_memkb, page);

    xc_memshr_control(xc, origin, 1);
    xc_memshr_control(xc, clone, 1);

    uint64_t shandle, chandle;
    int shared = 0;
    while (page >= 0) {
        page--;
        if (xc_memshr_nominate_gfn(xc, origin, page, &shandle)) {
            continue;
        }
        if (xc_memshr_nominate_gfn(xc, clone, page, &chandle)) {
            continue;
        }

        if (xc_memshr_share_gfns(xc, origin, page, shandle, clone, page,
                chandle))
            continue;

        shared++;
    }

    printf("Shared %i pages\n", shared);

    /*hvm_param_copy = 0;
    while (hvm_param_copy < HVM_PARAM_COUNT) {
        unsigned long value = 0;
        switch(hvm_params[hvm_param_copy]) {
            case HVM_PARAM_CONSOLE_PFN:
            case HVM_PARAM_IOREQ_PFN:
            case HVM_PARAM_BUFIOREQ_PFN:
            case HVM_PARAM_STORE_PFN:
                xc_get_hvm_param(xc, origin, hvm_params[hvm_param_copy], &value);
                if (value) {
                    printf("Setting HVM param %i with value %lu\n",
                        hvm_params[hvm_param_copy], value);

                    xc_clear_domain_page(xc, clone, hvm_params[hvm_param_copy]);
                    xc_set_hvm_param(xc, clone, hvm_params[hvm_param_copy], value);
                }
            break;
        }

        hvm_param_copy++;
    }*/

    /*printf("Setting HVM context of clone\n");

    if (xc_domain_hvm_setcontext(xc, clone, hvm_context, hvm_context_size)) {
        printf("Failed to set HVM context.\n");
        return 1;
    }*/

    xc_interface_close(xc);
    return 0;
}