} #ifdef CONFIG_HAS_MEM_SHARING if ( d->vm_event->share.ring_page ) { destroy_waitqueue_head(&d->vm_event->share.wq); (void)vm_event_disable(d, &d->vm_event->share); } #endif } int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec, XEN_GUEST_HANDLE_PARAM(void) u_domctl) { int rc; rc = xsm_vm_event_control(XSM_PRIV, d, vec->mode, vec->op); if ( rc ) return rc; if ( unlikely(d == current->domain) ) /* no domain_pause() */ { gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n"); return -EINVAL; } if ( unlikely(d->is_dying) ) { gdprintk(XENLOG_INFO, "Ignoring memory event op on dying domain %u\n", d->domain_id); return 0; }
int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop) { int rc; struct arch_domain *ad = &d->arch; uint32_t capabilities = get_capabilities(d); rc = xsm_vm_event_control(XSM_PRIV, d, mop->op, mop->event); if ( rc ) return rc; switch ( mop->op ) { case XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES: mop->event = capabilities; return 0; case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP: d->arch.mem_access_emulate_each_rep = !!mop->event; return 0; } /* * Sanity check */ if ( mop->op != XEN_DOMCTL_MONITOR_OP_ENABLE && mop->op != XEN_DOMCTL_MONITOR_OP_DISABLE ) return -EOPNOTSUPP; /* Check if event type is available. */ if ( !(capabilities & (1 << mop->event)) ) return -EOPNOTSUPP; switch ( mop->event ) { case XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG: { unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index); bool_t status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask); struct vcpu *v; rc = status_check(mop, status); if ( rc ) return rc; if ( mop->u.mov_to_cr.sync ) ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_sync &= ~ctrlreg_bitmask; if ( mop->u.mov_to_cr.onchangeonly ) ad->monitor.write_ctrlreg_onchangeonly |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask; domain_pause(d); if ( !status ) ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask; else ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask; domain_unpause(d); if ( mop->u.mov_to_cr.index == VM_EVENT_X86_CR3 ) /* Latches new CR3 mask through CR0 code */ for_each_vcpu ( d, v ) hvm_update_guest_cr(v, 0); break; } case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR: { bool_t status = ad->monitor.mov_to_msr_enabled; rc = status_check(mop, status); if ( rc ) return rc; if ( mop->op == XEN_DOMCTL_MONITOR_OP_ENABLE && mop->u.mov_to_msr.extended_capture ) { if ( hvm_enable_msr_exit_interception(d) ) ad->monitor.mov_to_msr_extended = 1; else return -EOPNOTSUPP; } else ad->monitor.mov_to_msr_extended = 0; domain_pause(d); ad->monitor.mov_to_msr_enabled = !status; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP: { bool_t status = ad->monitor.singlestep_enabled; rc = status_check(mop, status); if ( rc ) return rc; domain_pause(d); ad->monitor.singlestep_enabled = !status; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT: { bool_t status = ad->monitor.software_breakpoint_enabled; rc = status_check(mop, status); if ( rc ) return rc; domain_pause(d); ad->monitor.software_breakpoint_enabled = !status; domain_unpause(d); break; } case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST: { bool_t status = ad->monitor.guest_request_enabled; rc = status_check(mop, status); if ( rc ) return rc; ad->monitor.guest_request_sync = mop->u.guest_request.sync; domain_pause(d); ad->monitor.guest_request_enabled = !status; domain_unpause(d); break; } default: return -EOPNOTSUPP; }; return 0; }