Beispiel #1
0
int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
                             uint16_t index, bool enable, bool sync,
                             bool onchangeonly)
{
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_monitor_op;
    domctl.domain = domain_id;
    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG;
    domctl.u.monitor_op.u.mov_to_cr.index = index;
    domctl.u.monitor_op.u.mov_to_cr.sync = sync;
    domctl.u.monitor_op.u.mov_to_cr.onchangeonly = onchangeonly;

    return do_domctl(xch, &domctl);
}
Beispiel #2
0
int xc_mem_event_control(int xc_handle, domid_t domain_id, unsigned int op,
                         unsigned int mode, void *shared_page,
                         void *ring_page, unsigned long gfn)
{
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_mem_event_op;
    domctl.domain = domain_id;
    domctl.u.mem_event_op.op = op;
    domctl.u.mem_event_op.mode = mode;

    domctl.u.mem_event_op.shared_addr = (unsigned long)shared_page;
    domctl.u.mem_event_op.ring_addr = (unsigned long)ring_page;

    domctl.u.mem_event_op.gfn = gfn;
    
    return do_domctl(xc_handle, &domctl);
}
Beispiel #3
0
int xc_vcpu_getinfo(int xc_handle,
                    uint32_t domid,
                    uint32_t vcpu,
                    xc_vcpuinfo_t *info)
{
    int rc;
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_getvcpuinfo;
    domctl.domain = (domid_t)domid;
    domctl.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;

    rc = do_domctl(xc_handle, &domctl);

    memcpy(info, &domctl.u.getvcpuinfo, sizeof(*info));

    return rc;
}
Beispiel #4
0
Datei: xc_rt.c Projekt: CPFL/xen
int xc_sched_rtds_domain_set(xc_interface *xch,
                           uint32_t domid,
                           struct xen_domctl_sched_rtds *sdom)
{
    int rc;
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_scheduler_op;
    domctl.domain = (domid_t) domid;
    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
    domctl.u.scheduler_op.u.rtds.period = sdom->period;
    domctl.u.scheduler_op.u.rtds.budget = sdom->budget;

    rc = do_domctl(xch, &domctl);

    return rc;
}
int xc_alloc_real_mode_area(int xc_handle,
                            uint32_t domain,
                            unsigned int log)
{
    DECLARE_DOMCTL;
    int err;

    domctl.cmd = XEN_DOMCTL_real_mode_area;
    domctl.domain = (domid_t)domain;
    domctl.u.real_mode_area.log = log;

    err = do_domctl(xc_handle, &domctl);

    if (err)
        DPRINTF("Failed real mode area allocation for dom %u (log %u)\n",
                domain, log);

    return err;
}
Beispiel #6
0
Datei: xc_rt.c Projekt: CPFL/xen
int xc_sched_rtds_domain_get(xc_interface *xch,
                           uint32_t domid,
                           struct xen_domctl_sched_rtds *sdom)
{
    int rc;
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_scheduler_op;
    domctl.domain = (domid_t) domid;
    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;

    rc = do_domctl(xch, &domctl);

    if ( rc == 0 )
        *sdom = domctl.u.scheduler_op.u.rtds;

    return rc;
}
Beispiel #7
0
int xc_domain_memory_mapping(
    int xc_handle,
    uint32_t domid,
    unsigned long first_gfn,
    unsigned long first_mfn,
    unsigned long nr_mfns,
    uint32_t add_mapping)
{
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_memory_mapping;
    domctl.domain = domid;
    domctl.u.memory_mapping.first_gfn = first_gfn;
    domctl.u.memory_mapping.first_mfn = first_mfn;
    domctl.u.memory_mapping.nr_mfns = nr_mfns;
    domctl.u.memory_mapping.add_mapping = add_mapping;

    return do_domctl(xc_handle, &domctl);
}
Beispiel #8
0
int
xc_sched_credit2_domain_set(
    xc_interface *xch,
    uint32_t domid,
    struct xen_domctl_sched_credit2 *sdom)
{
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_scheduler_op;
    domctl.domain = (domid_t) domid;
    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
    domctl.u.scheduler_op.u.credit2 = *sdom;

    if ( do_domctl(xch, &domctl) )
        return -1;

    return 0;
}
Beispiel #9
0
int xc_domain_ioport_mapping(
    int xc_handle,
    uint32_t domid,
    uint32_t first_gport,
    uint32_t first_mport,
    uint32_t nr_ports,
    uint32_t add_mapping)
{
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_ioport_mapping;
    domctl.domain = domid;
    domctl.u.ioport_mapping.first_gport = first_gport;
    domctl.u.ioport_mapping.first_mport = first_mport;
    domctl.u.ioport_mapping.nr_ports = nr_ports;
    domctl.u.ioport_mapping.add_mapping = add_mapping;

    return do_domctl(xc_handle, &domctl);
}
Beispiel #10
0
int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
                           xc_psr_type type, uint32_t target,
                           uint64_t *data)
{
    int rc;
    DECLARE_DOMCTL;
    uint32_t cmd;

    switch ( type )
    {
    case XC_PSR_CAT_L3_CBM:
        cmd = XEN_DOMCTL_PSR_GET_L3_CBM;
        break;
    case XC_PSR_CAT_L3_CBM_CODE:
        cmd = XEN_DOMCTL_PSR_GET_L3_CODE;
        break;
    case XC_PSR_CAT_L3_CBM_DATA:
        cmd = XEN_DOMCTL_PSR_GET_L3_DATA;
        break;
    case XC_PSR_CAT_L2_CBM:
        cmd = XEN_DOMCTL_PSR_GET_L2_CBM;
        break;
    case XC_PSR_MBA_THRTL:
        cmd = XEN_DOMCTL_PSR_GET_MBA_THRTL;
        break;
    default:
        errno = EINVAL;
        return -1;
    }

    domctl.cmd = XEN_DOMCTL_psr_alloc;
    domctl.domain = domid;
    domctl.u.psr_alloc.cmd = cmd;
    domctl.u.psr_alloc.target = target;

    rc = do_domctl(xch, &domctl);

    if ( !rc )
        *data = domctl.u.psr_alloc.data;

    return rc;
}
Beispiel #11
0
int xc_domain_create(xc_interface *xch,
                     uint32_t ssidref,
                     xen_domain_handle_t handle,
                     uint32_t flags,
                     uint32_t *pdomid)
{
    int err;
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_createdomain;
    domctl.domain = (domid_t)*pdomid;
    domctl.u.createdomain.ssidref = ssidref;
    domctl.u.createdomain.flags   = flags;
    memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
    if ( (err = do_domctl(xch, &domctl)) != 0 )
        return err;

    *pdomid = (uint16_t)domctl.domain;
    return 0;
}
Beispiel #12
0
int xc_vcpu_setaffinity(xc_interface *xch,
                        uint32_t domid,
                        int vcpu,
                        xc_cpumap_t cpumap)
{
    DECLARE_DOMCTL;
    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
    int ret = -1;
    int cpusize;

    cpusize = xc_get_cpumap_size(xch);
    if (!cpusize)
    {
        PERROR("Could not get number of cpus");
        goto out;
    }

    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
    if ( local == NULL )
    {
        PERROR("Could not allocate memory for setvcpuaffinity domctl hypercall");
        goto out;
    }

    domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
    domctl.domain = (domid_t)domid;
    domctl.u.vcpuaffinity.vcpu    = vcpu;

    memcpy(local, cpumap, cpusize);

    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);

    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;

    ret = do_domctl(xch, &domctl);

    xc_hypercall_buffer_free(xch, local);

 out:
    return ret;
}
Beispiel #13
0
int xc_memshr_nominate_gref(xc_interface *xch,
                            uint32_t domid,
                            grant_ref_t gref,
                            uint64_t *handle)
{
    DECLARE_DOMCTL;
    struct xen_domctl_mem_sharing_op *op;
    int ret;

    domctl.cmd = XEN_DOMCTL_mem_sharing_op;
    domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
    domctl.domain = (domid_t)domid;
    op = &(domctl.u.mem_sharing_op);
    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF;
    op->u.nominate.u.grant_ref = gref;

    ret = do_domctl(xch, &domctl);
    if(!ret) *handle = op->u.nominate.handle; 

    return ret;
}
Beispiel #14
0
int xc_vcpu_getcontext(int xc_handle,
                       uint32_t domid,
                       uint32_t vcpu,
                       vcpu_guest_context_any_t *ctxt)
{
    int rc;
    DECLARE_DOMCTL;
    size_t sz = sizeof(vcpu_guest_context_any_t);

    domctl.cmd = XEN_DOMCTL_getvcpucontext;
    domctl.domain = (domid_t)domid;
    domctl.u.vcpucontext.vcpu   = (uint16_t)vcpu;
    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);

    
    if ( (rc = lock_pages(ctxt, sz)) != 0 )
        return rc;
    rc = do_domctl(xc_handle, &domctl);
    unlock_pages(ctxt, sz);

    return rc;
}
static int setup_hypercall_page(struct xc_dom_image *dom)
{
    DECLARE_DOMCTL;
    xen_pfn_t pfn;
    int rc;

    if ( dom->parms.virt_hypercall == -1 )
        return 0;
    pfn = (dom->parms.virt_hypercall - dom->parms.virt_base)
        >> XC_DOM_PAGE_SHIFT(dom);

    xc_dom_printf("%s: vaddr=0x%" PRIx64 " pfn=0x%" PRIpfn "\n", __FUNCTION__,
                  dom->parms.virt_hypercall, pfn);
    domctl.cmd = XEN_DOMCTL_hypercall_init;
    domctl.domain = dom->guest_domid;
    domctl.u.hypercall_init.gmfn = xc_dom_p2m_guest(dom, pfn);
    rc = do_domctl(dom->guest_xc, &domctl);
    if ( rc != 0 )
        xc_dom_panic(XC_INTERNAL_ERROR, "%s: HYPERCALL_INIT failed (rc=%d)\n",
                     __FUNCTION__, rc);
    return rc;
}
Beispiel #16
0
/* set info to hvm guest for restore */
int xc_domain_hvm_setcontext(int xc_handle,
                             uint32_t domid,
                             uint8_t *ctxt_buf,
                             uint32_t size)
{
    int ret;
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_sethvmcontext;
    domctl.domain = domid;
    domctl.u.hvmcontext.size = size;
    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);

    if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
        return ret;

    ret = do_domctl(xc_handle, &domctl);

    unlock_pages(ctxt_buf, size);

    return ret;
}
Beispiel #17
0
int xc_vcpu_setaffinity(int xc_handle,
                        uint32_t domid,
                        int vcpu,
                        uint64_t *cpumap, int cpusize)
{
    DECLARE_DOMCTL;
    int ret = -1;
    uint8_t *local = malloc(cpusize); 

    if(local == NULL)
    {
        PERROR("Could not alloc memory for Xen hypercall");
        goto out;
    }
    domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
    domctl.domain = (domid_t)domid;
    domctl.u.vcpuaffinity.vcpu    = vcpu;

    bitmap_64_to_byte(local, cpumap, cpusize * 8);

    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);

    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
    
    if ( lock_pages(local, cpusize) != 0 )
    {
        PERROR("Could not lock memory for Xen hypercall");
        goto out;
    }

    ret = do_domctl(xc_handle, &domctl);

    unlock_pages(local, cpusize);

 out:
    free(local);
    return ret;
}
Beispiel #18
0
int xc_sched_rt_domain_set(xc_interface *xch,
                           uint32_t domid,
                           struct xen_domctl_sched_rt_params *sdom)
{
    int rc;
    DECLARE_DOMCTL;

    domctl.cmd = XEN_DOMCTL_scheduler_op;
    domctl.domain = (domid_t) domid;
    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RT_DS;
    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
    domctl.u.scheduler_op.u.rt.vcpu_index = sdom->index;
    domctl.u.scheduler_op.u.rt.period = sdom->period;
    domctl.u.scheduler_op.u.rt.budget_low = sdom->budget_low;
    domctl.u.scheduler_op.u.rt.budget_high = sdom->budget_high;
    domctl.u.scheduler_op.u.rt.deadline = sdom->deadline;
    domctl.u.scheduler_op.u.rt.criticality_vcpu= sdom->criticality_vcpu;
    domctl.u.scheduler_op.u.rt.offl_flag = sdom->offl_flag;

    rc = do_domctl(xch, &domctl);

    return rc;
}
Beispiel #19
0
static int
xc_ia64_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
                     unsigned int start_page, unsigned int nr_pages)
{
    DECLARE_DOMCTL;
    int ret;

    domctl.cmd = XEN_DOMCTL_getmemlist;
    domctl.domain = (domid_t)domid;
    domctl.u.getmemlist.max_pfns = nr_pages;
    domctl.u.getmemlist.start_pfn = start_page;
    domctl.u.getmemlist.num_pfns = 0;
    set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);

    if (lock_pages(pfn_buf, nr_pages * sizeof(xen_pfn_t)) != 0) {
        PERROR("Could not lock pfn list buffer");
        return -1;
    }
    ret = do_domctl(xc_handle, &domctl);
    unlock_pages(pfn_buf, nr_pages * sizeof(xen_pfn_t));

    return ret < 0 ? -1 : nr_pages;
}
Beispiel #20
0
int xc_monitor_get_capabilities(xc_interface *xch, domid_t domain_id,
                                uint32_t *capabilities)
{
    int rc;
    DECLARE_DOMCTL;

    if ( !capabilities )
    {
        errno = EINVAL;
        return -1;
    }

    domctl.cmd = XEN_DOMCTL_monitor_op;
    domctl.domain = domain_id;
    domctl.u.monitor_op.op = XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES;

    rc = do_domctl(xch, &domctl);
    if ( rc )
        return rc;

    *capabilities = domctl.u.monitor_op.event;
    return 0;
}
Beispiel #21
0
/* set info to hvm guest for restore */
int xc_domain_hvm_setcontext(xc_interface *xch,
                             uint32_t domid,
                             uint8_t *ctxt_buf,
                             uint32_t size)
{
    int ret;
    DECLARE_DOMCTL;
    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);

    if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
        return -1;

    domctl.cmd = XEN_DOMCTL_sethvmcontext;
    domctl.domain = domid;
    domctl.u.hvmcontext.size = size;
    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);

    ret = do_domctl(xch, &domctl);

    xc_hypercall_bounce_post(xch, ctxt_buf);

    return ret;
}
Beispiel #22
0
int xc_vcpu_getcontext(xc_interface *xch,
                       uint32_t domid,
                       uint32_t vcpu,
                       vcpu_guest_context_any_t *ctxt)
{
    int rc;
    DECLARE_DOMCTL;
    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);

    if ( xc_hypercall_bounce_pre(xch, ctxt) )
        return -1;

    domctl.cmd = XEN_DOMCTL_getvcpucontext;
    domctl.domain = (domid_t)domid;
    domctl.u.vcpucontext.vcpu   = (uint16_t)vcpu;
    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);

    rc = do_domctl(xch, &domctl);

    xc_hypercall_bounce_post(xch, ctxt);

    return rc;
}
Beispiel #23
0
int xc_shadow_control(xc_interface *xch,
                      uint32_t domid,
                      unsigned int sop,
                      xc_hypercall_buffer_t *dirty_bitmap,
                      unsigned long pages,
                      unsigned long *mb,
                      uint32_t mode,
                      xc_shadow_op_stats_t *stats)
{
    int rc;
    DECLARE_DOMCTL;
    DECLARE_HYPERCALL_BUFFER_ARGUMENT(dirty_bitmap);

    memset(&domctl, 0, sizeof(domctl));

    domctl.cmd = XEN_DOMCTL_shadow_op;
    domctl.domain = (domid_t)domid;
    domctl.u.shadow_op.op     = sop;
    domctl.u.shadow_op.pages  = pages;
    domctl.u.shadow_op.mb     = mb ? *mb : 0;
    domctl.u.shadow_op.mode   = mode;
    if (dirty_bitmap != NULL)
        set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap,
                                dirty_bitmap);

    rc = do_domctl(xch, &domctl);

    if ( stats )
        memcpy(stats, &domctl.u.shadow_op.stats,
               sizeof(xc_shadow_op_stats_t));
    
    if ( mb ) 
        *mb = domctl.u.shadow_op.mb;

    return (rc == 0) ? domctl.u.shadow_op.pages : rc;
}
Beispiel #24
0
int xc_domctl(xc_interface *xch, struct xen_domctl *domctl)
{
    return do_domctl(xch, domctl);
}
Beispiel #25
0
int xc_domain_getinfo(xc_interface *xch,
                      uint32_t first_domid,
                      unsigned int max_doms,
                      xc_dominfo_t *info)
{
    unsigned int nr_doms;
    uint32_t next_domid = first_domid;
    DECLARE_DOMCTL;
    int rc = 0;

    memset(info, 0, max_doms*sizeof(xc_dominfo_t));

    for ( nr_doms = 0; nr_doms < max_doms; nr_doms++ )
    {
        domctl.cmd = XEN_DOMCTL_getdomaininfo;
        domctl.domain = (domid_t)next_domid;
        if ( (rc = do_domctl(xch, &domctl)) < 0 )
            break;
        info->domid      = (uint16_t)domctl.domain;

        info->dying    = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_dying);
        info->shutdown = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_shutdown);
        info->paused   = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_paused);
        info->blocked  = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_blocked);
        info->running  = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_running);
        info->hvm      = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
        info->debugged = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_debugged);

        info->shutdown_reason =
            (domctl.u.getdomaininfo.flags>>XEN_DOMINF_shutdownshift) &
            XEN_DOMINF_shutdownmask;

        if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_crash) )
        {
            info->shutdown = 0;
            info->crashed  = 1;
        }

        info->ssidref  = domctl.u.getdomaininfo.ssidref;
        info->nr_pages = domctl.u.getdomaininfo.tot_pages;
        info->nr_shared_pages = domctl.u.getdomaininfo.shr_pages;
        info->nr_paged_pages = domctl.u.getdomaininfo.paged_pages;
        info->max_memkb = domctl.u.getdomaininfo.max_pages << (PAGE_SHIFT-10);
        info->shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
        info->cpu_time = domctl.u.getdomaininfo.cpu_time;
        info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
        info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
        info->cpupool = domctl.u.getdomaininfo.cpupool;

        memcpy(info->handle, domctl.u.getdomaininfo.handle,
               sizeof(xen_domain_handle_t));

        next_domid = (uint16_t)domctl.domain + 1;
        info++;
    }

    if ( nr_doms == 0 )
        return rc;

    return nr_doms;
}
Beispiel #26
0
static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
{
    DECLARE_DOMCTL;
    xc_dominfo_t info;
    int i, rc = -1;
#if defined(__i386__) || defined(__x86_64__)
    struct domain_info_context _dinfo = { .guest_width = 0,
                                          .p2m_size = 0 };
    struct domain_info_context *dinfo = &_dinfo;
    unsigned long mfn;
    vcpu_guest_context_any_t ctxt;
    start_info_t *start_info;
    shared_info_t *shinfo = NULL;
    xen_pfn_t *p2m_frame_list_list = NULL;
    xen_pfn_t *p2m_frame_list = NULL;
    xen_pfn_t *p2m = NULL;
#endif

    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
    {
        PERROR("Could not get domain info");
        return rc;
    }

    /*
     * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
     */
#if defined(__i386__) || defined(__x86_64__)
    if ( info.hvm )
    {
        ERROR("Cannot resume uncooperative HVM guests");
        return rc;
    }

    if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
    {
        PERROR("Could not get domain width");
        return rc;
    }
    if ( dinfo->guest_width != sizeof(long) )
    {
        ERROR("Cannot resume uncooperative cross-address-size guests");
        return rc;
    }

    /* Map the shared info frame */
    shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
                                  PROT_READ, info.shared_info_frame);
    if ( shinfo == NULL )
    {
        ERROR("Couldn't map shared info");
        goto out;
    }

    dinfo->p2m_size = shinfo->arch.max_pfn;

    p2m_frame_list_list =
        xc_map_foreign_range(xch, domid, PAGE_SIZE, PROT_READ,
                             shinfo->arch.pfn_to_mfn_frame_list_list);
    if ( p2m_frame_list_list == NULL )
    {
        ERROR("Couldn't map p2m_frame_list_list");
        goto out;
    }

    p2m_frame_list = xc_map_foreign_pages(xch, domid, PROT_READ,
                                          p2m_frame_list_list,
                                          P2M_FLL_ENTRIES);
    if ( p2m_frame_list == NULL )
    {
        ERROR("Couldn't map p2m_frame_list");
        goto out;
    }

    /* Map all the frames of the pfn->mfn table. For migrate to succeed,
       the guest must not change which frames are used for this purpose.
       (its not clear why it would want to change them, and we'll be OK
       from a safety POV anyhow. */
    p2m = xc_map_foreign_pages(xch, domid, PROT_READ,
                               p2m_frame_list,
                               P2M_FL_ENTRIES);
    if ( p2m == NULL )
    {
        ERROR("Couldn't map p2m table");
        goto out;
    }

    if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
    {
        ERROR("Could not get vcpu context");
        goto out;
    }

    mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);

    start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
                                      PROT_READ | PROT_WRITE, mfn);
    if ( start_info == NULL )
    {
        ERROR("Couldn't map start_info");
        goto out;
    }

    start_info->store_mfn        = p2m[start_info->store_mfn];
    start_info->console.domU.mfn = p2m[start_info->console.domU.mfn];

    munmap(start_info, PAGE_SIZE);
#endif /* defined(__i386__) || defined(__x86_64__) */

    /* Reset all secondary CPU states. */
    for ( i = 1; i <= info.max_vcpu_id; i++ )
        if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
        {
            ERROR("Couldn't reset vcpu state");
            goto out;
        }

    /* Ready to resume domain execution now. */
    domctl.cmd = XEN_DOMCTL_resumedomain;
    domctl.domain = domid;
    rc = do_domctl(xch, &domctl);

out:
#if defined(__i386__) || defined(__x86_64__)
    if (p2m)
        munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
    if (p2m_frame_list)
        munmap(p2m_frame_list, P2M_FLL_ENTRIES*PAGE_SIZE);
    if (p2m_frame_list_list)
        munmap(p2m_frame_list_list, PAGE_SIZE);
    if (shinfo)
        munmap(shinfo, PAGE_SIZE);
#endif

    return rc;
}

/*
 * Resume execution of a domain after suspend shutdown.
 * This can happen in one of two ways:
 *  1. Resume with special return code.
 *  2. Reset guest environment so it believes it is resumed in a new
 *     domain context.
 * (2) should be used only for guests which cannot handle the special
 * new return code. (1) is always safe (but slower).
 */
int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
{
    return (fast
            ? xc_domain_resume_cooperative(xch, domid)
            : xc_domain_resume_any(xch, domid));
}
Beispiel #27
0
int xc_domctl(int xc_handle, struct xen_domctl *domctl)
{
    return do_domctl(xc_handle, domctl);
}
int xc_dom_boot_image(struct xc_dom_image *dom)
{
    DECLARE_DOMCTL;
    void *ctxt;
    int rc;

    xc_dom_printf("%s: called\n", __FUNCTION__);

    /* misc ia64 stuff*/
    if ( (rc = arch_setup_bootearly(dom)) != 0 )
        return rc;

    /* collect some info */
    domctl.cmd = XEN_DOMCTL_getdomaininfo;
    domctl.domain = dom->guest_domid;
    rc = do_domctl(dom->guest_xc, &domctl);
    if ( rc != 0 )
    {
        xc_dom_panic(XC_INTERNAL_ERROR,
                     "%s: getdomaininfo failed (rc=%d)\n", __FUNCTION__, rc);
        return rc;
    }
    if ( domctl.domain != dom->guest_domid )
    {
        xc_dom_panic(XC_INTERNAL_ERROR,
                     "%s: Huh? domid mismatch (%d != %d)\n", __FUNCTION__,
                     domctl.domain, dom->guest_domid);
        return -1;
    }
    dom->shared_info_mfn = domctl.u.getdomaininfo.shared_info_frame;

    /* sanity checks */
    if ( !xc_dom_compat_check(dom) )
        return -1;

    /* initial mm setup */
    if ( (rc = xc_dom_update_guest_p2m(dom)) != 0 )
        return rc;
    if ( dom->arch_hooks->setup_pgtables )
        if ( (rc = dom->arch_hooks->setup_pgtables(dom)) != 0 )
            return rc;

    if ( (rc = clear_page(dom, dom->console_pfn)) != 0 )
        return rc;
    if ( (rc = clear_page(dom, dom->xenstore_pfn)) != 0 )
        return rc;

    /* start info page */
    if ( dom->arch_hooks->start_info )
        dom->arch_hooks->start_info(dom);

    /* hypercall page */
    if ( (rc = setup_hypercall_page(dom)) != 0 )
        return rc;
    xc_dom_log_memory_footprint(dom);

    /* misc x86 stuff */
    if ( (rc = arch_setup_bootlate(dom)) != 0 )
        return rc;

    /* let the vm run */
    ctxt = xc_dom_malloc(dom, PAGE_SIZE * 2 /* FIXME */ );
    memset(ctxt, 0, PAGE_SIZE * 2);
    if ( (rc = dom->arch_hooks->vcpu(dom, ctxt)) != 0 )
        return rc;
    xc_dom_unmap_all(dom);
    rc = launch_vm(dom->guest_xc, dom->guest_domid, ctxt);

    return rc;
}