示例#1
0
文件: viridian.c 项目: djs55/xen
void viridian_domain_deinit(struct domain *d)
{
    struct vcpu *v;

    for_each_vcpu ( d, v )
        teardown_apic_assist(v);
}
示例#2
0
static uint32_t ioapic_get_delivery_bitmask(
    struct hvm_hw_vioapic *vioapic, uint16_t dest, uint8_t dest_mode)
{
    uint32_t mask = 0;
    struct vcpu *v;

    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "dest %d dest_mode %d",
                dest, dest_mode);

    if ( dest_mode == 0 ) /* Physical mode. */
    {
        if ( dest == 0xFF ) /* Broadcast. */
        {
            for_each_vcpu ( vioapic_domain(vioapic), v )
                mask |= 1 << v->vcpu_id;
            goto out;
        }

        for_each_vcpu ( vioapic_domain(vioapic), v )
        {
            if ( VLAPIC_ID(vcpu_vlapic(v)) == dest )
            {
                mask = 1 << v->vcpu_id;
                break;
            }
        }
    }
    else if ( dest != 0 ) /* Logical mode, MDA non-zero. */
示例#3
0
static void domain_shutdown_finalise(void)
{
    struct domain *d;
    struct vcpu *v;

    d = domain_shuttingdown[smp_processor_id()];
    domain_shuttingdown[smp_processor_id()] = NULL;

    BUG_ON(d == NULL);
    BUG_ON(d == current->domain);

    LOCK_BIGLOCK(d);

    /* Make sure that every vcpu is descheduled before we finalise. */
    for_each_vcpu ( d, v )
        vcpu_sleep_sync(v);
    BUG_ON(!cpus_empty(d->domain_dirty_cpumask));

    sync_pagetable_state(d);

    /* Don't set DOMF_shutdown until execution contexts are sync'ed. */
    if ( !test_and_set_bit(_DOMF_shutdown, &d->domain_flags) )
        send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);

    UNLOCK_BIGLOCK(d);

    put_domain(d);
}
示例#4
0
文件: schedule.c 项目: robhoes/xen
int sched_move_domain(struct domain *d, struct cpupool *c)
{
    struct vcpu *v;
    unsigned int new_p;
    void **vcpu_priv;
    void *domdata;
    void *vcpudata;
    struct scheduler *old_ops;
    void *old_domdata;

    domdata = SCHED_OP(c->sched, alloc_domdata, d);
    if ( domdata == NULL )
        return -ENOMEM;

    vcpu_priv = xzalloc_array(void *, d->max_vcpus);
    if ( vcpu_priv == NULL )
    {
        SCHED_OP(c->sched, free_domdata, domdata);
        return -ENOMEM;
    }

    for_each_vcpu ( d, v )
    {
        vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata, v, domdata);
        if ( vcpu_priv[v->vcpu_id] == NULL )
        {
            for_each_vcpu ( d, v )
                xfree(vcpu_priv[v->vcpu_id]);
            xfree(vcpu_priv);
            SCHED_OP(c->sched, free_domdata, domdata);
            return -ENOMEM;
        }
    }
示例#5
0
void domain_unpause(struct domain *d)
{
    struct vcpu *v;

    if ( atomic_dec_and_test(&d->pause_count) )
        for_each_vcpu( d, v )
            vcpu_wake(v);
}
示例#6
0
void domain_pause(struct domain *d)
{
    struct vcpu *v;

    ASSERT(d != current->domain);

    atomic_inc(&d->pause_count);

    for_each_vcpu( d, v )
        vcpu_sleep_sync(v);
}
示例#7
0
void domain_pause_for_debugger(void)
{
    struct domain *d = current->domain;
    struct vcpu *v;

    atomic_inc(&d->pause_count);
    if ( test_and_set_bool(d->is_paused_by_controller) )
        domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */

    for_each_vcpu ( d, v )
        vcpu_sleep_nosync(v);

    send_guest_global_virq(dom0, VIRQ_DEBUGGER);
}
示例#8
0
void domain_kill(struct domain *d)
{
    struct vcpu *v;

    domain_pause(d);
    if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
    {
        for_each_vcpu(d, v)
            sched_rem_domain(v);
        gnttab_release_mappings(d);
        domain_relinquish_resources(d);
        put_domain(d);

        send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
    }
}
示例#9
0
文件: save.c 项目: 0day-ci/xen
size_t hvm_save_size(struct domain *d) 
{
    struct vcpu *v;
    size_t sz;
    int i;
    
    /* Basic overhead for header and footer */
    sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);

    /* Plus space for each thing we will be saving */
    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
        if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
            for_each_vcpu(d, v)
                sz += hvm_sr_handlers[i].size;
        else 
            sz += hvm_sr_handlers[i].size;

    return sz;
}
示例#10
0
void domain_update_node_affinity(struct domain *d)
{
    cpumask_t cpumask = CPU_MASK_NONE;
    nodemask_t nodemask = NODE_MASK_NONE;
    struct vcpu *v;
    unsigned int node;

    spin_lock(&d->node_affinity_lock);

    for_each_vcpu ( d, v )
        cpus_or(cpumask, cpumask, v->cpu_affinity);

    for_each_online_node ( node )
        if ( cpus_intersects(node_to_cpumask(node), cpumask) )
            node_set(node, nodemask);

    d->node_affinity = nodemask;
    spin_unlock(&d->node_affinity_lock);
}
示例#11
0
文件: monitor.c 项目: TressaOrg/xen
int arch_monitor_domctl_event(struct domain *d,
                              struct xen_domctl_monitor_op *mop)
{
    struct arch_domain *ad = &d->arch;
    bool_t requested_status = (XEN_DOMCTL_MONITOR_OP_ENABLE == mop->op);

    switch ( mop->event )
    {
    case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
    {
        unsigned int ctrlreg_bitmask;
        bool_t old_status;

        /* sanity check: avoid left-shift undefined behavior */
        if ( unlikely(mop->u.mov_to_cr.index > 31) )
            return -EINVAL;

        ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index);
        old_status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask);

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);

        if ( mop->u.mov_to_cr.sync )
            ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask;

        if ( mop->u.mov_to_cr.onchangeonly )
            ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask;

        if ( requested_status )
            ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;

        if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
        {
            struct vcpu *v;
            /* Latches new CR3 mask through CR0 code. */
            for_each_vcpu ( d, v )
                hvm_update_guest_cr(v, 0);
        }

        domain_unpause(d);

        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
    {
        bool_t old_status;
        int rc;
        u32 msr = mop->u.mov_to_msr.msr;

        domain_pause(d);

        old_status = monitored_msr(d, msr);

        if ( unlikely(old_status == requested_status) )
        {
            domain_unpause(d);
            return -EEXIST;
        }

        if ( requested_status )
            rc = monitor_enable_msr(d, msr);
        else
            rc = monitor_disable_msr(d, msr);

        domain_unpause(d);

        return rc;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
    {
        bool_t old_status = ad->monitor.singlestep_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.singlestep_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT:
    {
        bool_t old_status = ad->monitor.software_breakpoint_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.software_breakpoint_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION:
    {
        bool_t old_status = ad->monitor.debug_exception_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.debug_exception_enabled = requested_status;
        ad->monitor.debug_exception_sync = requested_status ?
                                            mop->u.debug_exception.sync :
                                            0;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_CPUID:
    {
        bool_t old_status = ad->monitor.cpuid_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.cpuid_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    default:
        /*
         * Should not be reached unless arch_monitor_get_capabilities() is
         * not properly implemented.
         */
        ASSERT_UNREACHABLE();
        return -EOPNOTSUPP;
    }

    return 0;
}
示例#12
0
文件: monitor.c 项目: lwhibernate/xen
int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop)
{
    int rc;
    struct arch_domain *ad = &d->arch;
    uint32_t capabilities = get_capabilities(d);

    rc = xsm_vm_event_control(XSM_PRIV, d, mop->op, mop->event);
    if ( rc )
        return rc;

    switch ( mop->op )
    {
    case XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES:
        mop->event = capabilities;
        return 0;

    case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP:
        d->arch.mem_access_emulate_each_rep = !!mop->event;
        return 0;
    }

    /*
     * Sanity check
     */
    if ( mop->op != XEN_DOMCTL_MONITOR_OP_ENABLE &&
         mop->op != XEN_DOMCTL_MONITOR_OP_DISABLE )
        return -EOPNOTSUPP;

    /* Check if event type is available. */
    if ( !(capabilities & (1 << mop->event)) )
        return -EOPNOTSUPP;

    switch ( mop->event )
    {
    case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
    {
        unsigned int ctrlreg_bitmask =
            monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index);
        bool_t status =
            !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask);
        struct vcpu *v;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        if ( mop->u.mov_to_cr.sync )
            ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask;

        if ( mop->u.mov_to_cr.onchangeonly )
            ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask;

        domain_pause(d);

        if ( !status )
            ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;

        domain_unpause(d);

        if ( mop->u.mov_to_cr.index == VM_EVENT_X86_CR3 )
            /* Latches new CR3 mask through CR0 code */
            for_each_vcpu ( d, v )
                hvm_update_guest_cr(v, 0);

        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
    {
        bool_t status = ad->monitor.mov_to_msr_enabled;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        if ( mop->op == XEN_DOMCTL_MONITOR_OP_ENABLE &&
             mop->u.mov_to_msr.extended_capture )
        {
            if ( hvm_enable_msr_exit_interception(d) )
                ad->monitor.mov_to_msr_extended = 1;
            else
                return -EOPNOTSUPP;
        } else
            ad->monitor.mov_to_msr_extended = 0;

        domain_pause(d);
        ad->monitor.mov_to_msr_enabled = !status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
    {
        bool_t status = ad->monitor.singlestep_enabled;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        domain_pause(d);
        ad->monitor.singlestep_enabled = !status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT:
    {
        bool_t status = ad->monitor.software_breakpoint_enabled;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        domain_pause(d);
        ad->monitor.software_breakpoint_enabled = !status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
    {
        bool_t status = ad->monitor.guest_request_enabled;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        ad->monitor.guest_request_sync = mop->u.guest_request.sync;

        domain_pause(d);
        ad->monitor.guest_request_enabled = !status;
        domain_unpause(d);
        break;
    }

    default:
        return -EOPNOTSUPP;

    };

    return 0;
}
示例#13
0
文件: domctl.c 项目: xen-project/xen
static void update_domain_cpuid_info(struct domain *d,
                                     const xen_domctl_cpuid_t *ctl)
{
    switch ( ctl->input[0] )
    {
    case 0: {
        union {
            typeof(boot_cpu_data.x86_vendor_id) str;
            struct {
                uint32_t ebx, edx, ecx;
            } reg;
        } vendor_id = {
            .reg = {
                .ebx = ctl->ebx,
                .edx = ctl->edx,
                .ecx = ctl->ecx
            }
        };
        int old_vendor = d->arch.x86_vendor;

        d->arch.x86_vendor = get_cpu_vendor(vendor_id.str, gcv_guest);

        if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) )
        {
            struct vcpu *v;

            for_each_vcpu( d, v )
            hvm_update_guest_vendor(v);
        }

        break;
    }

    case 1:
        d->arch.x86 = (ctl->eax >> 8) & 0xf;
        if ( d->arch.x86 == 0xf )
            d->arch.x86 += (ctl->eax >> 20) & 0xff;
        d->arch.x86_model = (ctl->eax >> 4) & 0xf;
        if ( d->arch.x86 >= 0x6 )
            d->arch.x86_model |= (ctl->eax >> 12) & 0xf0;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_1cd) == LCAP_1cd) )
        {
            uint64_t mask = cpuidmask_defaults._1cd;
            uint32_t ecx = ctl->ecx & pv_featureset[FEATURESET_1c];
            uint32_t edx = ctl->edx & pv_featureset[FEATURESET_1d];

            /*
             * Must expose hosts HTT and X2APIC value so a guest using native
             * CPUID can correctly interpret other leaves which cannot be
             * masked.
             */
            if ( cpu_has_x2apic )
                ecx |= cpufeat_mask(X86_FEATURE_X2APIC);
            if ( cpu_has_htt )
                edx |= cpufeat_mask(X86_FEATURE_HTT);

            switch ( boot_cpu_data.x86_vendor )
            {
            case X86_VENDOR_INTEL:
                /*
                 * Intel masking MSRs are documented as AND masks.
                 * Experimentally, they are applied after OSXSAVE and APIC
                 * are fast-forwarded from real hardware state.
                 */
                mask &= ((uint64_t)edx << 32) | ecx;

                if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
                    ecx = cpufeat_mask(X86_FEATURE_OSXSAVE);
                else
                    ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)edx << 32) | ecx;
                break;

            case X86_VENDOR_AMD:
                mask &= ((uint64_t)ecx << 32) | edx;

                /*
                 * AMD masking MSRs are documented as overrides.
                 * Experimentally, fast-forwarding of the OSXSAVE and APIC
                 * bits from real hardware state only occurs if the MSR has
                 * the respective bits set.
                 */
                if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
                    ecx = cpufeat_mask(X86_FEATURE_OSXSAVE);
                else
                    ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)ecx << 32) | edx;
                break;
            }

            d->arch.pv_domain.cpuidmasks->_1cd = mask;
        }
        break;

    case 6:
        if ( is_pv_domain(d) && ((levelling_caps & LCAP_6c) == LCAP_6c) )
        {
            uint64_t mask = cpuidmask_defaults._6c;

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
                mask &= (~0ULL << 32) | ctl->ecx;

            d->arch.pv_domain.cpuidmasks->_6c = mask;
        }
        break;

    case 7:
        if ( ctl->input[1] != 0 )
            break;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) )
        {
            uint64_t mask = cpuidmask_defaults._7ab0;
            uint32_t eax = ctl->eax;
            uint32_t ebx = ctl->ebx & pv_featureset[FEATURESET_7b0];

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
                mask &= ((uint64_t)eax << 32) | ebx;

            d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
        }
        break;

    case 0xd:
        if ( ctl->input[1] != 1 )
            break;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_Da1) == LCAP_Da1) )
        {
            uint64_t mask = cpuidmask_defaults.Da1;
            uint32_t eax = ctl->eax & pv_featureset[FEATURESET_Da1];

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
                mask &= (~0ULL << 32) | eax;

            d->arch.pv_domain.cpuidmasks->Da1 = mask;
        }
        break;

    case 0x80000001:
        if ( is_pv_domain(d) && ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) )
        {
            uint64_t mask = cpuidmask_defaults.e1cd;
            uint32_t ecx = ctl->ecx & pv_featureset[FEATURESET_e1c];
            uint32_t edx = ctl->edx & pv_featureset[FEATURESET_e1d];

            /*
             * Must expose hosts CMP_LEGACY value so a guest using native
             * CPUID can correctly interpret other leaves which cannot be
             * masked.
             */
            if ( cpu_has_cmp_legacy )
                ecx |= cpufeat_mask(X86_FEATURE_CMP_LEGACY);

            /* If not emulating AMD, clear the duplicated features in e1d. */
            if ( d->arch.x86_vendor != X86_VENDOR_AMD )
                edx &= ~CPUID_COMMON_1D_FEATURES;

            switch ( boot_cpu_data.x86_vendor )
            {
            case X86_VENDOR_INTEL:
                mask &= ((uint64_t)edx << 32) | ecx;
                break;

            case X86_VENDOR_AMD:
                mask &= ((uint64_t)ecx << 32) | edx;

                /*
                 * Fast-forward bits - Must be set in the masking MSR for
                 * fast-forwarding to occur in hardware.
                 */
                ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)ecx << 32) | edx;
                break;
            }

            d->arch.pv_domain.cpuidmasks->e1cd = mask;
        }
        break;
    }
}
示例#14
0
static int alloc_xenoprof_struct(
    struct domain *d, int max_samples, int is_passive)
{
    struct vcpu *v;
    int nvcpu, npages, bufsize, max_bufsize;
    unsigned max_max_samples;
    int i;

    d->xenoprof = xmalloc(struct xenoprof);

    if ( d->xenoprof == NULL )
    {
        printk("alloc_xenoprof_struct(): memory allocation failed\n");
        return -ENOMEM;
    }

    memset(d->xenoprof, 0, sizeof(*d->xenoprof));

    d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
    if ( d->xenoprof->vcpu == NULL )
    {
        xfree(d->xenoprof);
        d->xenoprof = NULL;
        printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
        return -ENOMEM;
    }

    memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));

    nvcpu = 0;
    for_each_vcpu ( d, v )
        nvcpu++;

    bufsize = sizeof(struct xenoprof_buf);
    i = sizeof(struct event_log);
#ifdef CONFIG_COMPAT
    d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? dom0 : d);
    if ( XENOPROF_COMPAT(d->xenoprof) )
    {
        bufsize = sizeof(struct compat_oprof_buf);
        i = sizeof(struct compat_event_log);
    }
#endif

    /* reduce max_samples if necessary to limit pages allocated */
    max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
    max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
    if ( (unsigned)max_samples > max_max_samples )
        max_samples = max_max_samples;

    bufsize += (max_samples - 1) * i;
    npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;

    d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
    if ( d->xenoprof->rawbuf == NULL )
    {
        xfree(d->xenoprof);
        d->xenoprof = NULL;
        return -ENOMEM;
    }

    d->xenoprof->npages = npages;
    d->xenoprof->nbuf = nvcpu;
    d->xenoprof->bufsize = bufsize;
    d->xenoprof->domain_ready = 0;
    d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;

    /* Update buffer pointers for active vcpus */
    i = 0;
    for_each_vcpu ( d, v )
    {
        xenoprof_buf_t *buf = (xenoprof_buf_t *)
            &d->xenoprof->rawbuf[i * bufsize];

        d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
        d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
        xenoprof_buf(d, buf, event_size) = max_samples;
        xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;

        i++;
        /* in the unlikely case that the number of active vcpus changes */
        if ( i >= nvcpu )
            break;
    }