int vmsi_deliver(struct domain *d, int pirq) { struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags; int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec; uint8_t dest = (uint8_t)flags; uint8_t dest_mode = !!(flags & VMSI_DM_MASK); uint8_t delivery_mode = (flags & VMSI_DELIV_MASK) >> GLFAGS_SHIFT_DELIV_MODE; uint8_t trig_mode = (flags & VMSI_TRIG_MODE) >> GLFAGS_SHIFT_TRG_MODE; struct vlapic *target; struct vcpu *v; HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "msi: dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", dest, dest_mode, delivery_mode, vector, trig_mode); if ( !( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI ) ) { gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq); return 0; } switch ( delivery_mode ) { case dest_LowestPrio: { target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode); if ( target != NULL ) vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode); else HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: " "vector=%x delivery_mode=%x\n", vector, dest_LowestPrio); break; } case dest_Fixed: case dest_ExtINT: { for_each_vcpu ( d, v ) if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode) ) vmsi_inj_irq(d, vcpu_vlapic(v), vector, trig_mode, delivery_mode); break; } case dest_SMI: case dest_NMI: case dest_INIT: case dest__reserved_2: default: gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n", delivery_mode); break; } return 1; }
static unsigned long vioapic_read(struct vcpu *v, unsigned long addr, unsigned long length) { struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain); uint32_t result; HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "addr %lx", addr); addr &= 0xff; switch ( addr ) { case VIOAPIC_REG_SELECT: result = vioapic->ioregsel; break; case VIOAPIC_REG_WINDOW: result = vioapic_read_indirect(vioapic, addr, length); break; default: result = 0; break; } return result; }
static uint32_t ioapic_get_delivery_bitmask( struct hvm_hw_vioapic *vioapic, uint16_t dest, uint8_t dest_mode) { uint32_t mask = 0; struct vcpu *v; HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "dest %d dest_mode %d", dest, dest_mode); if ( dest_mode == 0 ) /* Physical mode. */ { if ( dest == 0xFF ) /* Broadcast. */ { for_each_vcpu ( vioapic_domain(vioapic), v ) mask |= 1 << v->vcpu_id; goto out; } for_each_vcpu ( vioapic_domain(vioapic), v ) { if ( VLAPIC_ID(vcpu_vlapic(v)) == dest ) { mask = 1 << v->vcpu_id; break; } } } else if ( dest != 0 ) /* Logical mode, MDA non-zero. */
static void ioapic_inj_irq( struct hvm_hw_vioapic *vioapic, struct vlapic *target, uint8_t vector, uint8_t trig_mode, uint8_t delivery_mode) { HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d", vector, trig_mode, delivery_mode); ASSERT((delivery_mode == dest_Fixed) || (delivery_mode == dest_LowestPrio)); if ( vlapic_set_irq(target, vector, trig_mode) ) vcpu_kick(vlapic_vcpu(target)); }
static void vmsi_inj_irq( struct vlapic *target, uint8_t vector, uint8_t trig_mode, uint8_t delivery_mode) { HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vmsi_inj_irq: vec %02x trig %d dm %d\n", vector, trig_mode, delivery_mode); switch ( delivery_mode ) { case dest_Fixed: case dest_LowestPrio: vlapic_set_irq(target, vector, trig_mode); break; default: BUG(); } }
void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci) { uint32_t flags = pirq_dpci->gmsi.gflags; int vector = pirq_dpci->gmsi.gvec; uint8_t dest = (uint8_t)flags; uint8_t dest_mode = !!(flags & VMSI_DM_MASK); uint8_t delivery_mode = (flags & VMSI_DELIV_MASK) >> GFLAGS_SHIFT_DELIV_MODE; uint8_t trig_mode = (flags&VMSI_TRIG_MODE) >> GFLAGS_SHIFT_TRG_MODE; HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "msi: dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", dest, dest_mode, delivery_mode, vector, trig_mode); ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI); vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode); }
static void vioapic_write_indirect( struct hvm_hw_vioapic *vioapic, unsigned long addr, unsigned long length, unsigned long val) { switch ( vioapic->ioregsel ) { case VIOAPIC_REG_VERSION: /* Writes are ignored. */ break; #if !VIOAPIC_IS_IOSAPIC case VIOAPIC_REG_APIC_ID: vioapic->id = (val >> 24) & 0xf; break; case VIOAPIC_REG_ARB_ID: break; #endif default: { uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1; HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "change redir index %x val %lx", redir_index, val); if ( redir_index >= VIOAPIC_NUM_PINS ) { gdprintk(XENLOG_WARNING, "vioapic_write_indirect " "error register %x\n", vioapic->ioregsel); break; } vioapic_write_redirent( vioapic, redir_index, vioapic->ioregsel&1, val); break; } } }
int vmsi_deliver( struct domain *d, int vector, uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t trig_mode) { struct vlapic *target; struct vcpu *v; switch ( delivery_mode ) { case dest_LowestPrio: target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode); if ( target != NULL ) { vmsi_inj_irq(target, vector, trig_mode, delivery_mode); break; } HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "null MSI round robin: vector=%02x\n", vector); return -ESRCH; case dest_Fixed: for_each_vcpu ( d, v ) if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode) ) vmsi_inj_irq(vcpu_vlapic(v), vector, trig_mode, delivery_mode); break; default: printk(XENLOG_G_WARNING "%pv: Unsupported MSI delivery mode %d for Dom%d\n", current, delivery_mode, d->domain_id); return -EINVAL; } return 0; }
static void vmsi_inj_irq( struct domain *d, struct vlapic *target, uint8_t vector, uint8_t trig_mode, uint8_t delivery_mode) { HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vmsi_inj_irq " "irq %d trig %d delive mode %d\n", vector, trig_mode, delivery_mode); switch ( delivery_mode ) { case dest_Fixed: case dest_LowestPrio: if ( vlapic_set_irq(target, vector, trig_mode) ) vcpu_kick(vlapic_vcpu(target)); break; default: gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode); break; } }