static int physdev_hvm_map_pirq( struct domain *d, int type, int *index, int *pirq) { int ret = 0; spin_lock(&d->event_lock); switch ( type ) { case MAP_PIRQ_TYPE_GSI: { const struct hvm_irq_dpci *hvm_irq_dpci; unsigned int machine_gsi = 0; if ( *index < 0 || *index >= NR_HVM_IRQS ) { ret = -EINVAL; break; } /* find the machine gsi corresponding to the * emulated gsi */ hvm_irq_dpci = domain_get_irq_dpci(d); if ( hvm_irq_dpci ) { const struct hvm_girq_dpci_mapping *girq; BUILD_BUG_ON(ARRAY_SIZE(hvm_irq_dpci->girq) < NR_HVM_IRQS); list_for_each_entry ( girq, &hvm_irq_dpci->girq[*index], list ) machine_gsi = girq->machine_gsi; } /* found one, this mean we are dealing with a pt device */ if ( machine_gsi ) { *index = domain_pirq_to_irq(d, machine_gsi); *pirq = machine_gsi; ret = (*pirq > 0) ? 0 : *pirq; } /* we didn't find any, this means we are dealing * with an emulated device */ else { if ( *pirq < 0 ) *pirq = get_free_pirq(d, type); ret = map_domain_emuirq_pirq(d, *pirq, *index); } break; } default: ret = -EINVAL; dprintk(XENLOG_G_WARNING, "map type %d not supported yet\n", type); break; } spin_unlock(&d->event_lock); return ret; }
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) { struct evtchn *chn; struct domain *d = current->domain; struct vcpu *v = d->vcpu[0]; struct pirq *info; int port, pirq = bind->pirq; long rc; if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) return -EINVAL; if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) ) return -EPERM; spin_lock(&d->event_lock); if ( pirq_to_evtchn(d, pirq) != 0 ) ERROR_EXIT(-EEXIST); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); info = pirq_get_info(d, pirq); if ( !info ) ERROR_EXIT(-ENOMEM); info->evtchn = port; rc = (!is_hvm_domain(d) ? pirq_guest_bind(v, info, !!(bind->flags & BIND_PIRQ__WILL_SHARE)) : 0); if ( rc != 0 ) { info->evtchn = 0; pirq_cleanup_check(info, d); goto out; } chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); bind->port = port; #ifdef CONFIG_X86 if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 ) map_domain_emuirq_pirq(d, pirq, IRQ_PT); #endif out: spin_unlock(&d->event_lock); return rc; }
static int physdev_hvm_map_pirq( struct domain *d, struct physdev_map_pirq *map) { int pirq, ret = 0; spin_lock(&d->event_lock); switch ( map->type ) { case MAP_PIRQ_TYPE_GSI: { struct hvm_irq_dpci *hvm_irq_dpci; struct hvm_girq_dpci_mapping *girq; uint32_t machine_gsi = 0; /* find the machine gsi corresponding to the * emulated gsi */ hvm_irq_dpci = domain_get_irq_dpci(d); if ( hvm_irq_dpci ) { list_for_each_entry ( girq, &hvm_irq_dpci->girq[map->index], list ) machine_gsi = girq->machine_gsi; } /* found one, this mean we are dealing with a pt device */ if ( machine_gsi ) { map->index = domain_pirq_to_irq(d, machine_gsi); pirq = machine_gsi; ret = (pirq > 0) ? 0 : pirq; } /* we didn't find any, this means we are dealing * with an emulated device */ else { pirq = map->pirq; if ( pirq < 0 ) pirq = get_free_pirq(d, map->type, map->index); ret = map_domain_emuirq_pirq(d, pirq, map->index); } map->pirq = pirq; break; } default: ret = -EINVAL; dprintk(XENLOG_G_WARNING, "map type %d not supported yet\n", map->type); break; } spin_unlock(&d->event_lock); return ret; }
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) { struct evtchn *chn; struct domain *d = current->domain; struct vcpu *v = d->vcpu[0]; int port, pirq = bind->pirq; long rc; if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) return -EINVAL; if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) ) return -EPERM; spin_lock(&d->event_lock); if ( d->pirq_to_evtchn[pirq] != 0 ) ERROR_EXIT(-EEXIST); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); d->pirq_to_evtchn[pirq] = port; rc = (!is_hvm_domain(d) ? pirq_guest_bind( v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE)) : 0); if ( rc != 0 ) { d->pirq_to_evtchn[pirq] = 0; goto out; } chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); bind->port = port; if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 ) map_domain_emuirq_pirq(d, pirq, IRQ_PT); out: spin_unlock(&d->event_lock); return rc; }
int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data) { uint32_t tmp = (uint32_t) addr; uint8_t dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; uint8_t dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK); uint8_t delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK) >> MSI_DATA_DELIVERY_MODE_SHIFT; uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK) >> MSI_DATA_TRIGGER_SHIFT; uint8_t vector = data & MSI_DATA_VECTOR_MASK; if ( !vector ) { int pirq = ((addr >> 32) & 0xffffff00) | dest; if ( pirq > 0 ) { struct pirq *info = pirq_info(d, pirq); /* if it is the first time, allocate the pirq */ if ( !info || info->arch.hvm.emuirq == IRQ_UNBOUND ) { int rc; spin_lock(&d->event_lock); rc = map_domain_emuirq_pirq(d, pirq, IRQ_MSI_EMU); spin_unlock(&d->event_lock); if ( rc ) return rc; info = pirq_info(d, pirq); if ( !info ) return -EBUSY; } else if ( info->arch.hvm.emuirq != IRQ_MSI_EMU ) return -EINVAL; send_guest_pirq(d, info); return 0; } return -ERANGE; }