void domain_resume(struct domain *d) { struct vcpu *v; /* * Some code paths assume that shutdown status does not get reset under * their feet (e.g., some assertions make this assumption). */ domain_pause(d); spin_lock(&d->shutdown_lock); d->is_shutting_down = d->is_shut_down = 0; d->shutdown_code = -1; for_each_vcpu ( d, v ) { if ( v->paused_for_shutdown ) vcpu_unpause(v); v->paused_for_shutdown = 0; } spin_unlock(&d->shutdown_lock); domain_unpause(d); }
static void freeze_domains(void) { struct domain *d; rcu_read_lock(&domlist_read_lock); /* * Note that we iterate in order of domain-id. Hence we will pause dom0 * first which is required for correctness (as only dom0 can add domains to * the domain list). Otherwise we could miss concurrently-created domains. */ for_each_domain ( d ) domain_pause(d); rcu_read_unlock(&domlist_read_lock); }
void domain_kill(struct domain *d) { struct vcpu *v; domain_pause(d); if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) ) { for_each_vcpu(d, v) sched_rem_domain(v); gnttab_release_mappings(d); domain_relinquish_resources(d); put_domain(d); send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC); } }
static void freeze_domains(void) { struct domain *d; struct vcpu *v; rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) { switch ( d->domain_id ) { case 0: for_each_vcpu ( d, v ) if ( v != current ) vcpu_pause(v); break; default: domain_pause(d); break; } } rcu_read_unlock(&domlist_read_lock); }
int domain_kill(struct domain *d) { int rc = 0; if ( d == current->domain ) return -EINVAL; /* Protected by domctl_lock. */ switch ( d->is_dying ) { case DOMDYING_alive: domain_pause(d); d->is_dying = DOMDYING_dying; spin_barrier(&d->domain_lock); evtchn_destroy(d); gnttab_release_mappings(d); tmem_destroy(d->tmem); d->tmem = NULL; /* fallthrough */ case DOMDYING_dying: rc = domain_relinquish_resources(d); if ( rc != 0 ) { BUG_ON(rc != -EAGAIN); break; } d->is_dying = DOMDYING_dead; put_domain(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); /* fallthrough */ case DOMDYING_dead: break; } return rc; }
int arch_monitor_domctl_event(struct domain *d, struct xen_domctl_monitor_op *mop) { struct arch_domain *ad = &d->arch; bool_t requested_status = (XEN_DOMCTL_MONITOR_OP_ENABLE == mop->op); switch ( mop->event ) { case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG: { unsigned int ctrlreg_bitmask; bool_t old_status; /* sanity check: avoid left-shift undefined behavior */ if ( unlikely(mop->u.mov_to_cr.index > 31) ) return -EINVAL; ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index); old_status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask); if ( unlikely(old_status == requested_status) ) return -EEXIST; domain_pause(d); if ( mop->u.mov_to_cr.sync ) ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask; if ( mop->u.mov_to_cr.onchangeonly ) ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask; if ( requested_status ) ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask; if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index ) { struct vcpu *v; /* Latches new CR3 mask through CR0 code. */ for_each_vcpu ( d, v ) hvm_update_guest_cr(v, 0); } domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR: { bool_t old_status; int rc; u32 msr = mop->u.mov_to_msr.msr; domain_pause(d); old_status = monitored_msr(d, msr); if ( unlikely(old_status == requested_status) ) { domain_unpause(d); return -EEXIST; } if ( requested_status ) rc = monitor_enable_msr(d, msr); else rc = monitor_disable_msr(d, msr); domain_unpause(d); return rc; } case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP: { bool_t old_status = ad->monitor.singlestep_enabled; if ( unlikely(old_status == requested_status) ) return -EEXIST; domain_pause(d); ad->monitor.singlestep_enabled = requested_status; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT: { bool_t old_status = ad->monitor.software_breakpoint_enabled; if ( unlikely(old_status == requested_status) ) return -EEXIST; domain_pause(d); ad->monitor.software_breakpoint_enabled = requested_status; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION: { bool_t old_status = ad->monitor.debug_exception_enabled; if ( unlikely(old_status == requested_status) ) return -EEXIST; domain_pause(d); ad->monitor.debug_exception_enabled = requested_status; ad->monitor.debug_exception_sync = requested_status ? mop->u.debug_exception.sync : 0; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_CPUID: { bool_t old_status = ad->monitor.cpuid_enabled; if ( unlikely(old_status == requested_status) ) return -EEXIST; domain_pause(d); ad->monitor.cpuid_enabled = requested_status; domain_unpause(d); break; } default: /* * Should not be reached unless arch_monitor_get_capabilities() is * not properly implemented. */ ASSERT_UNREACHABLE(); return -EOPNOTSUPP; } return 0; }
int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) { int rc; struct arch_domain *ad = &d->arch; uint32_t capabilities = get_capabilities(d); rc = xsm_vm_event_control(XSM_PRIV, d, mop->op, mop->event); if ( rc ) return rc; switch ( mop->op ) { case XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES: mop->event = capabilities; return 0; case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP: d->arch.mem_access_emulate_each_rep = !!mop->event; return 0; } /* * Sanity check */ if ( mop->op != XEN_DOMCTL_MONITOR_OP_ENABLE && mop->op != XEN_DOMCTL_MONITOR_OP_DISABLE ) return -EOPNOTSUPP; /* Check if event type is available. */ if ( !(capabilities & (1 << mop->event)) ) return -EOPNOTSUPP; switch ( mop->event ) { case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG: { unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index); bool_t status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask); struct vcpu *v; rc = status_check(mop, status); if ( rc ) return rc; if ( mop->u.mov_to_cr.sync ) ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask; if ( mop->u.mov_to_cr.onchangeonly ) ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask; domain_pause(d); if ( !status ) ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask; domain_unpause(d); if ( mop->u.mov_to_cr.index == VM_EVENT_X86_CR3 ) /* Latches new CR3 mask through CR0 code */ for_each_vcpu ( d, v ) hvm_update_guest_cr(v, 0); break; } case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR: { bool_t status = ad->monitor.mov_to_msr_enabled; rc = status_check(mop, status); if ( rc ) return rc; if ( mop->op == XEN_DOMCTL_MONITOR_OP_ENABLE && mop->u.mov_to_msr.extended_capture ) { if ( hvm_enable_msr_exit_interception(d) ) ad->monitor.mov_to_msr_extended = 1; else return -EOPNOTSUPP; } else ad->monitor.mov_to_msr_extended = 0; domain_pause(d); ad->monitor.mov_to_msr_enabled = !status; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP: { bool_t status = ad->monitor.singlestep_enabled; rc = status_check(mop, status); if ( rc ) return rc; domain_pause(d); ad->monitor.singlestep_enabled = !status; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT: { bool_t status = ad->monitor.software_breakpoint_enabled; rc = status_check(mop, status); if ( rc ) return rc; domain_pause(d); ad->monitor.software_breakpoint_enabled = !status; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST: { bool_t status = ad->monitor.guest_request_enabled; rc = status_check(mop, status); if ( rc ) return rc; ad->monitor.guest_request_sync = mop->u.guest_request.sync; domain_pause(d); ad->monitor.guest_request_enabled = !status; domain_unpause(d); break; } default: return -EOPNOTSUPP; }; return 0; }
void domain_pause_by_systemcontroller(struct domain *d) { domain_pause(d); if ( test_and_set_bool(d->is_paused_by_controller) ) domain_unpause(d); }