/* * for Intel MCE, broadcast vMCE to all vcpus * for AMD MCE, only inject vMCE to vcpu0 * * @ d, domain to which would inject vmce * @ vcpu, * -1 (VMCE_INJECT_BROADCAST), broadcast vMCE to all vcpus * >= 0, vcpu, the vMCE is injected to */ int inject_vmce(struct domain *d, int vcpu) { struct vcpu *v; int ret = -ESRCH; for_each_vcpu ( d, v ) { if ( vcpu != VMCE_INJECT_BROADCAST && vcpu != v->vcpu_id ) continue; if ( (has_hvm_container_domain(d) || guest_has_trap_callback(d, v->vcpu_id, TRAP_machine_check)) && !test_and_set_bool(v->mce_pending) ) { mce_printk(MCE_VERBOSE, "MCE: inject vMCE to d%d:v%d\n", d->domain_id, v->vcpu_id); vcpu_kick(v); ret = 0; } else { mce_printk(MCE_QUIET, "Failed to inject vMCE to d%d:v%d\n", d->domain_id, v->vcpu_id); ret = -EBUSY; break; } if ( vcpu != VMCE_INJECT_BROADCAST ) break; } return ret; }
void hvm_assert_evtchn_irq(struct vcpu *v) { if ( unlikely(in_irq() || !local_irq_is_enabled()) ) { tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet); return; } if ( is_hvm_pv_evtchn_vcpu(v) ) vcpu_kick(v); else if ( v->vcpu_id == 0 ) hvm_set_callback_irq_level(v); }
static void ioapic_inj_irq( struct hvm_hw_vioapic *vioapic, struct vlapic *target, uint8_t vector, uint8_t trig_mode, uint8_t delivery_mode) { HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d", vector, trig_mode, delivery_mode); ASSERT((delivery_mode == dest_Fixed) || (delivery_mode == dest_LowestPrio)); if ( vlapic_set_irq(target, vector, trig_mode) ) vcpu_kick(vlapic_vcpu(target)); }
static void vmsi_inj_irq( struct domain *d, struct vlapic *target, uint8_t vector, uint8_t trig_mode, uint8_t delivery_mode) { HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vmsi_inj_irq " "irq %d trig %d delive mode %d\n", vector, trig_mode, delivery_mode); switch ( delivery_mode ) { case dest_Fixed: case dest_LowestPrio: if ( vlapic_set_irq(target, vector, trig_mode) ) vcpu_kick(vlapic_vcpu(target)); break; default: gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode); break; } }