static int physdev_hvm_map_pirq( struct domain *d, int type, int *index, int *pirq) { int ret = 0; spin_lock(&d->event_lock); switch ( type ) { case MAP_PIRQ_TYPE_GSI: { const struct hvm_irq_dpci *hvm_irq_dpci; unsigned int machine_gsi = 0; if ( *index < 0 || *index >= NR_HVM_IRQS ) { ret = -EINVAL; break; } /* find the machine gsi corresponding to the * emulated gsi */ hvm_irq_dpci = domain_get_irq_dpci(d); if ( hvm_irq_dpci ) { const struct hvm_girq_dpci_mapping *girq; BUILD_BUG_ON(ARRAY_SIZE(hvm_irq_dpci->girq) < NR_HVM_IRQS); list_for_each_entry ( girq, &hvm_irq_dpci->girq[*index], list ) machine_gsi = girq->machine_gsi; } /* found one, this mean we are dealing with a pt device */ if ( machine_gsi ) { *index = domain_pirq_to_irq(d, machine_gsi); *pirq = machine_gsi; ret = (*pirq > 0) ? 0 : *pirq; } /* we didn't find any, this means we are dealing * with an emulated device */ else { if ( *pirq < 0 ) *pirq = get_free_pirq(d, type); ret = map_domain_emuirq_pirq(d, *pirq, *index); } break; } default: ret = -EINVAL; dprintk(XENLOG_G_WARNING, "map type %d not supported yet\n", type); break; } spin_unlock(&d->event_lock); return ret; }
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) { struct evtchn *chn; struct domain *d = current->domain; struct vcpu *v = d->vcpu[0]; struct pirq *info; int port, pirq = bind->pirq; long rc; if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) return -EINVAL; if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) ) return -EPERM; spin_lock(&d->event_lock); if ( pirq_to_evtchn(d, pirq) != 0 ) ERROR_EXIT(-EEXIST); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); info = pirq_get_info(d, pirq); if ( !info ) ERROR_EXIT(-ENOMEM); info->evtchn = port; rc = (!is_hvm_domain(d) ? pirq_guest_bind(v, info, !!(bind->flags & BIND_PIRQ__WILL_SHARE)) : 0); if ( rc != 0 ) { info->evtchn = 0; pirq_cleanup_check(info, d); goto out; } chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); bind->port = port; #ifdef CONFIG_X86 if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 ) map_domain_emuirq_pirq(d, pirq, IRQ_PT); #endif out: spin_unlock(&d->event_lock); return rc; }
static int physdev_hvm_map_pirq( struct domain *d, struct physdev_map_pirq *map) { int pirq, ret = 0; spin_lock(&d->event_lock); switch ( map->type ) { case MAP_PIRQ_TYPE_GSI: { struct hvm_irq_dpci *hvm_irq_dpci; struct hvm_girq_dpci_mapping *girq; uint32_t machine_gsi = 0; /* find the machine gsi corresponding to the * emulated gsi */ hvm_irq_dpci = domain_get_irq_dpci(d); if ( hvm_irq_dpci ) { list_for_each_entry ( girq, &hvm_irq_dpci->girq[map->index], list ) machine_gsi = girq->machine_gsi; } /* found one, this mean we are dealing with a pt device */ if ( machine_gsi ) { map->index = domain_pirq_to_irq(d, machine_gsi); pirq = machine_gsi; ret = (pirq > 0) ? 0 : pirq; } /* we didn't find any, this means we are dealing * with an emulated device */ else { pirq = map->pirq; if ( pirq < 0 ) pirq = get_free_pirq(d, map->type, map->index); ret = map_domain_emuirq_pirq(d, pirq, map->index); } map->pirq = pirq; break; } default: ret = -EINVAL; dprintk(XENLOG_G_WARNING, "map type %d not supported yet\n", map->type); break; } spin_unlock(&d->event_lock); return ret; }
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) { struct evtchn *chn; struct domain *d = current->domain; struct vcpu *v = d->vcpu[0]; int port, pirq = bind->pirq; long rc; if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) return -EINVAL; if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) ) return -EPERM; spin_lock(&d->event_lock); if ( d->pirq_to_evtchn[pirq] != 0 ) ERROR_EXIT(-EEXIST); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); d->pirq_to_evtchn[pirq] = port; rc = (!is_hvm_domain(d) ? pirq_guest_bind( v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE)) : 0); if ( rc != 0 ) { d->pirq_to_evtchn[pirq] = 0; goto out; } chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); bind->port = port; if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 ) map_domain_emuirq_pirq(d, pirq, IRQ_PT); out: spin_unlock(&d->event_lock); return rc; }
int physdev_unmap_pirq(domid_t domid, int pirq) { struct domain *d; int ret; ret = rcu_lock_target_domain_by_id(domid, &d); if ( ret ) return ret; if ( is_hvm_domain(d) ) { spin_lock(&d->event_lock); if ( domain_pirq_to_emuirq(d, pirq) != IRQ_UNBOUND ) ret = unmap_domain_pirq_emuirq(d, pirq); spin_unlock(&d->event_lock); if ( domid == DOMID_SELF || ret ) goto free_domain; } ret = -EPERM; if ( !IS_PRIV_FOR(current->domain, d) ) goto free_domain; ret = xsm_irq_permission(d, domain_pirq_to_irq(d, pirq), 0); if ( ret ) goto free_domain; spin_lock(&pcidevs_lock); spin_lock(&d->event_lock); ret = unmap_domain_pirq(d, pirq); spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); free_domain: rcu_unlock_domain(d); return ret; }
int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p, struct msi_info *msi) { struct domain *d = current->domain; int pirq, irq, ret = 0; void *map_data = NULL; if ( domid == DOMID_SELF && is_hvm_domain(d) ) { /* * Only makes sense for vector-based callback, else HVM-IRQ logic * calls back into itself and deadlocks on hvm_domain.irq_lock. */ if ( !is_hvm_pv_evtchn_domain(d) ) return -EINVAL; return physdev_hvm_map_pirq(d, type, index, pirq_p); } d = rcu_lock_domain_by_any_id(domid); if ( d == NULL ) return -ESRCH; ret = xsm_map_domain_pirq(XSM_TARGET, d); if ( ret ) goto free_domain; /* Verify or get irq. */ switch ( type ) { case MAP_PIRQ_TYPE_GSI: if ( *index < 0 || *index >= nr_irqs_gsi ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, *index); ret = -EINVAL; goto free_domain; } irq = domain_pirq_to_irq(current->domain, *index); if ( irq <= 0 ) { if ( is_hardware_domain(current->domain) ) irq = *index; else { dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n", d->domain_id); ret = -EINVAL; goto free_domain; } } break; case MAP_PIRQ_TYPE_MSI: if ( !msi->table_base ) msi->entry_nr = 1; irq = *index; if ( irq == -1 ) case MAP_PIRQ_TYPE_MULTI_MSI: irq = create_irq(NUMA_NO_NODE); if ( irq < nr_irqs_gsi || irq >= nr_irqs ) { dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n", d->domain_id); ret = -EINVAL; goto free_domain; } msi->irq = irq; map_data = msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_irq_to_pirq(d, irq); if ( *pirq_p < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, *index, *pirq_p, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else if ( type == MAP_PIRQ_TYPE_MULTI_MSI ) { if ( msi->entry_nr <= 0 || msi->entry_nr > 32 ) ret = -EDOM; else if ( msi->entry_nr != 1 && !iommu_intremap ) ret = -EOPNOTSUPP; else { while ( msi->entry_nr & (msi->entry_nr - 1) ) msi->entry_nr += msi->entry_nr & -msi->entry_nr; pirq = get_free_pirqs(d, msi->entry_nr); if ( pirq < 0 ) { while ( (msi->entry_nr >>= 1) > 1 ) if ( get_free_pirqs(d, msi->entry_nr) > 0 ) break; dprintk(XENLOG_G_ERR, "dom%d: no block of %d free pirqs\n", d->domain_id, msi->entry_nr << 1); ret = pirq; } } if ( ret < 0 ) goto done; } else { pirq = get_free_pirq(d, type); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } }
static long __evtchn_close(struct domain *d1, int port1) { struct domain *d2 = NULL; struct vcpu *v; struct evtchn *chn1, *chn2; int port2; long rc = 0; again: spin_lock(&d1->event_lock); if ( !port_is_valid(d1, port1) ) { rc = -EINVAL; goto out; } chn1 = evtchn_from_port(d1, port1); /* Guest cannot close a Xen-attached event channel. */ if ( unlikely(consumer_is_xen(chn1)) ) { rc = -EINVAL; goto out; } switch ( chn1->state ) { case ECS_FREE: case ECS_RESERVED: rc = -EINVAL; goto out; case ECS_UNBOUND: break; case ECS_PIRQ: { struct pirq *pirq = pirq_info(d1, chn1->u.pirq.irq); if ( !pirq ) break; if ( !is_hvm_domain(d1) ) pirq_guest_unbind(d1, pirq); pirq->evtchn = 0; pirq_cleanup_check(pirq, d1); unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]); #ifdef CONFIG_X86 if ( is_hvm_domain(d1) && domain_pirq_to_irq(d1, pirq->pirq) > 0 ) unmap_domain_pirq_emuirq(d1, pirq->pirq); #endif break; } case ECS_VIRQ: for_each_vcpu ( d1, v ) { if ( v->virq_to_evtchn[chn1->u.virq] != port1 ) continue; v->virq_to_evtchn[chn1->u.virq] = 0; spin_barrier(&v->virq_lock); } break; case ECS_IPI: break; case ECS_INTERDOMAIN: if ( d2 == NULL ) { d2 = chn1->u.interdomain.remote_dom; /* If we unlock d1 then we could lose d2. Must get a reference. */ if ( unlikely(!get_domain(d2)) ) BUG(); if ( d1 < d2 ) { spin_lock(&d2->event_lock); } else if ( d1 != d2 ) { spin_unlock(&d1->event_lock); spin_lock(&d2->event_lock); goto again; } } else if ( d2 != chn1->u.interdomain.remote_dom ) { /* * We can only get here if the port was closed and re-bound after * unlocking d1 but before locking d2 above. We could retry but * it is easier to return the same error as if we had seen the * port in ECS_CLOSED. It must have passed through that state for * us to end up here, so it's a valid error to return. */ rc = -EINVAL; goto out; } port2 = chn1->u.interdomain.remote_port; BUG_ON(!port_is_valid(d2, port2)); chn2 = evtchn_from_port(d2, port2); BUG_ON(chn2->state != ECS_INTERDOMAIN); BUG_ON(chn2->u.interdomain.remote_dom != d1); chn2->state = ECS_UNBOUND; chn2->u.unbound.remote_domid = d1->domain_id; break; default: BUG(); } /* Clear pending event to avoid unexpected behavior on re-bind. */ clear_bit(port1, &shared_info(d1, evtchn_pending)); /* Reset binding to vcpu0 when the channel is freed. */ chn1->state = ECS_FREE; chn1->notify_vcpu_id = 0; xsm_evtchn_close_post(chn1); out: if ( d2 != NULL ) { if ( d1 != d2 ) spin_unlock(&d2->event_lock); put_domain(d2); } spin_unlock(&d1->event_lock); return rc; }
int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p, struct msi_info *msi) { struct domain *d = current->domain; int pirq, irq, ret = 0; void *map_data = NULL; if ( domid == DOMID_SELF && is_hvm_domain(d) ) { /* * Only makes sense for vector-based callback, else HVM-IRQ logic * calls back into itself and deadlocks on hvm_domain.irq_lock. */ if ( !is_hvm_pv_evtchn_domain(d) ) return -EINVAL; return physdev_hvm_map_pirq(d, type, index, pirq_p); } d = rcu_lock_domain_by_any_id(domid); if ( d == NULL ) return -ESRCH; ret = xsm_map_domain_pirq(XSM_TARGET, d); if ( ret ) goto free_domain; /* Verify or get irq. */ switch ( type ) { case MAP_PIRQ_TYPE_GSI: if ( *index < 0 || *index >= nr_irqs_gsi ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, *index); ret = -EINVAL; goto free_domain; } irq = domain_pirq_to_irq(current->domain, *index); if ( irq <= 0 ) { if ( is_hardware_domain(current->domain) ) irq = *index; else { dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n", d->domain_id); ret = -EINVAL; goto free_domain; } } break; case MAP_PIRQ_TYPE_MSI: irq = *index; if ( irq == -1 ) irq = create_irq(NUMA_NO_NODE); if ( irq < nr_irqs_gsi || irq >= nr_irqs ) { dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n", d->domain_id); ret = -EINVAL; goto free_domain; } msi->irq = irq; map_data = msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_irq_to_pirq(d, irq); if ( *pirq_p < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, *index, *pirq_p, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, type); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( pirq && pirq != *pirq_p ) { dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n", d->domain_id, *index, *pirq_p); ret = -EEXIST; goto done; } else pirq = *pirq_p; } ret = map_domain_pirq(d, pirq, irq, type, map_data); if ( ret == 0 ) *pirq_p = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (type == MAP_PIRQ_TYPE_MSI) && (*index == -1) ) destroy_irq(irq); free_domain: rcu_unlock_domain(d); return ret; }
static int physdev_map_pirq(struct physdev_map_pirq *map) { struct domain *d; int pirq, irq, ret = 0; struct msi_info _msi; void *map_data = NULL; ret = rcu_lock_target_domain_by_id(map->domid, &d); if ( ret ) return ret; if ( map->domid == DOMID_SELF && is_hvm_domain(d) ) { ret = physdev_hvm_map_pirq(d, map); goto free_domain; } if ( !IS_PRIV_FOR(current->domain, d) ) { ret = -EPERM; goto free_domain; } /* Verify or get irq. */ switch ( map->type ) { case MAP_PIRQ_TYPE_GSI: if ( map->index < 0 || map->index >= nr_irqs_gsi ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, map->index); ret = -EINVAL; goto free_domain; } irq = domain_pirq_to_irq(current->domain, map->index); if ( irq <= 0 ) { if ( IS_PRIV(current->domain) ) irq = map->index; else { dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n", d->domain_id); ret = -EINVAL; goto free_domain; } } break; case MAP_PIRQ_TYPE_MSI: irq = map->index; if ( irq == -1 ) irq = create_irq(); if ( irq < 0 || irq >= nr_irqs ) { dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n", d->domain_id); ret = -EINVAL; goto free_domain; } _msi.bus = map->bus; _msi.devfn = map->devfn; _msi.entry_nr = map->entry_nr; _msi.table_base = map->table_base; _msi.irq = irq; map_data = &_msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, map->type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_irq_to_pirq(d, irq); if ( map->pirq < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, map->index, map->pirq, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, map->type, map->index); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( pirq && pirq != map->pirq ) { dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n", d->domain_id, map->index, map->pirq); ret = -EEXIST; goto done; } else pirq = map->pirq; } ret = map_domain_pirq(d, pirq, irq, map->type, map_data); if ( ret == 0 ) map->pirq = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) ) destroy_irq(irq); free_domain: rcu_unlock_domain(d); return ret; }