Esempio n. 1
0
void domain_resume(struct domain *d)
{
    struct vcpu *v;

    /*
     * Some code paths assume that shutdown status does not get reset under
     * their feet (e.g., some assertions make this assumption).
     */
    domain_pause(d);

    spin_lock(&d->shutdown_lock);

    d->is_shutting_down = d->is_shut_down = 0;
    d->shutdown_code = -1;

    for_each_vcpu ( d, v )
    {
        if ( v->paused_for_shutdown )
            vcpu_unpause(v);
        v->paused_for_shutdown = 0;
    }

    spin_unlock(&d->shutdown_lock);

    domain_unpause(d);
}
Esempio n. 2
0
static void thaw_domains(void)
{
    struct domain *d;

    rcu_read_lock(&domlist_read_lock);
    for_each_domain ( d )
        domain_unpause(d);
    rcu_read_unlock(&domlist_read_lock);
}
Esempio n. 3
0
static void thaw_domains(void)
{
    struct domain *d;

    rcu_read_lock(&domlist_read_lock);
    for_each_domain ( d )
    {
        restore_vcpu_affinity(d);
        domain_unpause(d);
    }
    rcu_read_unlock(&domlist_read_lock);
}
Esempio n. 4
0
void domain_pause_for_debugger(void)
{
    struct domain *d = current->domain;
    struct vcpu *v;

    atomic_inc(&d->pause_count);
    if ( test_and_set_bool(d->is_paused_by_controller) )
        domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */

    for_each_vcpu ( d, v )
        vcpu_sleep_nosync(v);

    send_guest_global_virq(dom0, VIRQ_DEBUGGER);
}
Esempio n. 5
0
static void thaw_domains(void)
{
    struct domain *d;
    struct vcpu *v;

    rcu_read_lock(&domlist_read_lock);
    for_each_domain ( d )
    {
        switch ( d->domain_id )
        {
        case 0:
            for_each_vcpu ( d, v )
                if ( v != current )
                    vcpu_unpause(v);
            break;
        default:
            domain_unpause(d);
            break;
        }
    }
    rcu_read_unlock(&domlist_read_lock);
}
Esempio n. 6
0
int arch_monitor_domctl_event(struct domain *d,
                              struct xen_domctl_monitor_op *mop)
{
    struct arch_domain *ad = &d->arch;
    bool_t requested_status = (XEN_DOMCTL_MONITOR_OP_ENABLE == mop->op);

    switch ( mop->event )
    {
    case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
    {
        unsigned int ctrlreg_bitmask;
        bool_t old_status;

        /* sanity check: avoid left-shift undefined behavior */
        if ( unlikely(mop->u.mov_to_cr.index > 31) )
            return -EINVAL;

        ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index);
        old_status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask);

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);

        if ( mop->u.mov_to_cr.sync )
            ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask;

        if ( mop->u.mov_to_cr.onchangeonly )
            ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask;

        if ( requested_status )
            ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;

        if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
        {
            struct vcpu *v;
            /* Latches new CR3 mask through CR0 code. */
            for_each_vcpu ( d, v )
                hvm_update_guest_cr(v, 0);
        }

        domain_unpause(d);

        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
    {
        bool_t old_status;
        int rc;
        u32 msr = mop->u.mov_to_msr.msr;

        domain_pause(d);

        old_status = monitored_msr(d, msr);

        if ( unlikely(old_status == requested_status) )
        {
            domain_unpause(d);
            return -EEXIST;
        }

        if ( requested_status )
            rc = monitor_enable_msr(d, msr);
        else
            rc = monitor_disable_msr(d, msr);

        domain_unpause(d);

        return rc;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
    {
        bool_t old_status = ad->monitor.singlestep_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.singlestep_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT:
    {
        bool_t old_status = ad->monitor.software_breakpoint_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.software_breakpoint_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION:
    {
        bool_t old_status = ad->monitor.debug_exception_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.debug_exception_enabled = requested_status;
        ad->monitor.debug_exception_sync = requested_status ?
                                            mop->u.debug_exception.sync :
                                            0;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_CPUID:
    {
        bool_t old_status = ad->monitor.cpuid_enabled;

        if ( unlikely(old_status == requested_status) )
            return -EEXIST;

        domain_pause(d);
        ad->monitor.cpuid_enabled = requested_status;
        domain_unpause(d);
        break;
    }

    default:
        /*
         * Should not be reached unless arch_monitor_get_capabilities() is
         * not properly implemented.
         */
        ASSERT_UNREACHABLE();
        return -EOPNOTSUPP;
    }

    return 0;
}
Esempio n. 7
0
int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop)
{
    int rc;
    struct arch_domain *ad = &d->arch;
    uint32_t capabilities = get_capabilities(d);

    rc = xsm_vm_event_control(XSM_PRIV, d, mop->op, mop->event);
    if ( rc )
        return rc;

    switch ( mop->op )
    {
    case XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES:
        mop->event = capabilities;
        return 0;

    case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP:
        d->arch.mem_access_emulate_each_rep = !!mop->event;
        return 0;
    }

    /*
     * Sanity check
     */
    if ( mop->op != XEN_DOMCTL_MONITOR_OP_ENABLE &&
         mop->op != XEN_DOMCTL_MONITOR_OP_DISABLE )
        return -EOPNOTSUPP;

    /* Check if event type is available. */
    if ( !(capabilities & (1 << mop->event)) )
        return -EOPNOTSUPP;

    switch ( mop->event )
    {
    case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
    {
        unsigned int ctrlreg_bitmask =
            monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index);
        bool_t status =
            !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask);
        struct vcpu *v;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        if ( mop->u.mov_to_cr.sync )
            ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask;

        if ( mop->u.mov_to_cr.onchangeonly )
            ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask;

        domain_pause(d);

        if ( !status )
            ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask;
        else
            ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;

        domain_unpause(d);

        if ( mop->u.mov_to_cr.index == VM_EVENT_X86_CR3 )
            /* Latches new CR3 mask through CR0 code */
            for_each_vcpu ( d, v )
                hvm_update_guest_cr(v, 0);

        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
    {
        bool_t status = ad->monitor.mov_to_msr_enabled;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        if ( mop->op == XEN_DOMCTL_MONITOR_OP_ENABLE &&
             mop->u.mov_to_msr.extended_capture )
        {
            if ( hvm_enable_msr_exit_interception(d) )
                ad->monitor.mov_to_msr_extended = 1;
            else
                return -EOPNOTSUPP;
        } else
            ad->monitor.mov_to_msr_extended = 0;

        domain_pause(d);
        ad->monitor.mov_to_msr_enabled = !status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
    {
        bool_t status = ad->monitor.singlestep_enabled;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        domain_pause(d);
        ad->monitor.singlestep_enabled = !status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT:
    {
        bool_t status = ad->monitor.software_breakpoint_enabled;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        domain_pause(d);
        ad->monitor.software_breakpoint_enabled = !status;
        domain_unpause(d);
        break;
    }

    case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
    {
        bool_t status = ad->monitor.guest_request_enabled;

        rc = status_check(mop, status);
        if ( rc )
            return rc;

        ad->monitor.guest_request_sync = mop->u.guest_request.sync;

        domain_pause(d);
        ad->monitor.guest_request_enabled = !status;
        domain_unpause(d);
        break;
    }

    default:
        return -EOPNOTSUPP;

    };

    return 0;
}
Esempio n. 8
0
void domain_unpause_by_systemcontroller(struct domain *d)
{
    if ( test_and_clear_bool(d->is_paused_by_controller) )
        domain_unpause(d);
}