int physdev_unmap_pirq(domid_t domid, int pirq) { struct domain *d; int ret; d = rcu_lock_domain_by_any_id(domid); if ( d == NULL ) return -ESRCH; ret = xsm_unmap_domain_pirq(XSM_TARGET, d); if ( ret ) goto free_domain; if ( is_hvm_domain(d) ) { spin_lock(&d->event_lock); if ( domain_pirq_to_emuirq(d, pirq) != IRQ_UNBOUND ) ret = unmap_domain_pirq_emuirq(d, pirq); spin_unlock(&d->event_lock); if ( domid == DOMID_SELF || ret ) goto free_domain; } spin_lock(&pcidevs_lock); spin_lock(&d->event_lock); ret = unmap_domain_pirq(d, pirq); spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); free_domain: rcu_unlock_domain(d); return ret; }
int do_mem_event_op(int op, uint32_t domain, void *arg) { int ret; struct domain *d; ret = rcu_lock_live_remote_domain_by_id(domain, &d); if ( ret ) return ret; ret = xsm_mem_event_op(XSM_TARGET, d, op); if ( ret ) goto out; switch (op) { case XENMEM_paging_op: ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg); break; case XENMEM_access_op: ret = mem_access_memop(d, (xen_mem_event_op_t *) arg); break; case XENMEM_sharing_op: ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg); break; default: ret = -ENOSYS; } out: rcu_unlock_domain(d); return ret; }
int do_mem_event_op(int op, uint32_t domain, void *arg) { int ret; struct domain *d; d = get_mem_event_op_target(domain, &ret); if ( !d ) return ret; switch (op) { case XENMEM_paging_op: ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg); break; case XENMEM_access_op: ret = mem_access_memop(d, (xen_mem_event_op_t *) arg); break; case XENMEM_sharing_op: ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg); break; default: ret = -ENOSYS; } rcu_unlock_domain(d); return ret; }
static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap) { struct domain *d; int ret; ret = rcu_lock_target_domain_by_id(unmap->domid, &d); if ( ret ) return ret; if ( is_hvm_domain(d) ) { spin_lock(&d->event_lock); if ( domain_pirq_to_emuirq(d, unmap->pirq) != IRQ_UNBOUND ) ret = unmap_domain_pirq_emuirq(d, unmap->pirq); spin_unlock(&d->event_lock); if ( unmap->domid == DOMID_SELF || ret ) goto free_domain; } ret = -EPERM; if ( !IS_PRIV_FOR(current->domain, d) ) goto free_domain; spin_lock(&pcidevs_lock); spin_lock(&d->event_lock); ret = unmap_domain_pirq(d, unmap->pirq); spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); free_domain: rcu_unlock_domain(d); return ret; }
int do_mem_event_op(int op, uint32_t domain, void *arg) { int ret; struct domain *d; ret = rcu_lock_live_remote_domain_by_id(domain, &d); if ( ret ) return ret; ret = xsm_mem_event_op(XSM_DM_PRIV, d, op); if ( ret ) goto out; switch (op) { #ifdef HAS_MEM_PAGING case XENMEM_paging_op: ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg); break; #endif #ifdef HAS_MEM_SHARING case XENMEM_sharing_op: ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg); break; #endif default: ret = -ENOSYS; } out: rcu_unlock_domain(d); return ret; }
static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap) { struct domain *d; int ret; if ( !IS_PRIV(current->domain) ) return -EPERM; if ( unmap->domid == DOMID_SELF ) d = rcu_lock_domain(current->domain); else d = rcu_lock_domain_by_id(unmap->domid); if ( d == NULL ) return -ESRCH; spin_lock(&pcidevs_lock); spin_lock(&d->event_lock); ret = unmap_domain_pirq(d, unmap->pirq); spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); rcu_unlock_domain(d); return ret; }
static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) { struct evtchn *chn; struct domain *d; int port; domid_t dom = alloc->dom; long rc; rc = rcu_lock_target_domain_by_id(dom, &d); if ( rc ) return rc; spin_lock(&d->event_lock); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT_DOM(port, d); chn = evtchn_from_port(d, port); rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom); if ( rc ) goto out; chn->state = ECS_UNBOUND; if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF ) chn->u.unbound.remote_domid = current->domain->domain_id; alloc->port = port; out: spin_unlock(&d->event_lock); rcu_unlock_domain(d); return rc; }
static int flask_relabel_domain(struct xen_flask_relabel *arg) { int rc; struct domain *d; struct domain_security_struct *csec = current->domain->ssid; struct domain_security_struct *dsec; struct avc_audit_data ad; AVC_AUDIT_DATA_INIT(&ad, NONE); d = rcu_lock_domain_by_any_id(arg->domid); if ( d == NULL ) return -ESRCH; ad.sdom = current->domain; ad.tdom = d; dsec = d->ssid; if ( arg->domid == DOMID_SELF ) { rc = avc_has_perm(dsec->sid, arg->sid, SECCLASS_DOMAIN2, DOMAIN2__RELABELSELF, &ad); if ( rc ) goto out; } else { rc = avc_has_perm(csec->sid, dsec->sid, SECCLASS_DOMAIN2, DOMAIN2__RELABELFROM, &ad); if ( rc ) goto out; rc = avc_has_perm(csec->sid, arg->sid, SECCLASS_DOMAIN2, DOMAIN2__RELABELTO, &ad); if ( rc ) goto out; } rc = avc_has_perm(dsec->sid, arg->sid, SECCLASS_DOMAIN, DOMAIN__TRANSITION, &ad); if ( rc ) goto out; dsec->sid = arg->sid; dsec->self_sid = arg->sid; security_transition_sid(dsec->sid, dsec->sid, SECCLASS_DOMAIN, &dsec->self_sid); if ( d->target ) { struct domain_security_struct *tsec = d->target->ssid; security_transition_sid(tsec->sid, dsec->sid, SECCLASS_DOMAIN, &dsec->target_sid); } out: rcu_unlock_domain(d); return rc; }
int rcu_lock_remote_target_domain_by_id(domid_t dom, struct domain **d) { if ( (*d = rcu_lock_domain_by_id(dom)) == NULL ) return -ESRCH; if ( (*d == current->domain) || !IS_PRIV_FOR(current->domain, *d) ) { rcu_unlock_domain(*d); return -EPERM; } return 0; }
static inline int is_free_domid(domid_t dom) { struct domain *d; if ( dom >= DOMID_FIRST_RESERVED ) return 0; if ( (d = rcu_lock_domain_by_id(dom)) == NULL ) return 1; rcu_unlock_domain(d); return 0; }
static int late_hwdom_init(struct domain *d) { #ifdef CONFIG_LATE_HWDOM struct domain *dom0; int rv; if ( d != hardware_domain || d->domain_id == 0 ) return 0; rv = xsm_init_hardware_domain(XSM_HOOK, d); if ( rv ) return rv; printk("Initialising hardware domain %d\n", hardware_domid); dom0 = rcu_lock_domain_by_id(0); ASSERT(dom0 != NULL); /* * Hardware resource ranges for domain 0 have been set up from * various sources intended to restrict the hardware domain's * access. Apply these ranges to the actual hardware domain. * * Because the lists are being swapped, a side effect of this * operation is that Domain 0's rangesets are cleared. Since * domain 0 should not be accessing the hardware when it constructs * a hardware domain, this should not be a problem. Both lists * may be modified after this hypercall returns if a more complex * device model is desired. */ rangeset_swap(d->irq_caps, dom0->irq_caps); rangeset_swap(d->iomem_caps, dom0->iomem_caps); #ifdef CONFIG_X86 rangeset_swap(d->arch.ioport_caps, dom0->arch.ioport_caps); #endif rcu_unlock_domain(dom0); iommu_hwdom_init(d); return rv; #else return 0; #endif }
int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d) { if ( dom == DOMID_SELF ) { *d = rcu_lock_current_domain(); return 0; } if ( (*d = rcu_lock_domain_by_id(dom)) == NULL ) return -ESRCH; if ( !IS_PRIV_FOR(current->domain, *d) ) { rcu_unlock_domain(*d); return -EPERM; } return 0; }
struct domain *get_mem_event_op_target(uint32_t domain, int *rc) { struct domain *d; /* Get the target domain */ *rc = rcu_lock_remote_target_domain_by_id(domain, &d); if ( *rc != 0 ) return NULL; /* Not dying? */ if ( d->is_dying ) { rcu_unlock_domain(d); *rc = -EINVAL; return NULL; } return d; }
static long evtchn_reset(evtchn_reset_t *r) { domid_t dom = r->dom; struct domain *d; int i, rc; rc = rcu_lock_target_domain_by_id(dom, &d); if ( rc ) return rc; rc = xsm_evtchn_reset(current->domain, d); if ( rc ) goto out; for ( i = 0; port_is_valid(d, i); i++ ) (void)__evtchn_close(d, i); rc = 0; out: rcu_unlock_domain(d); return rc; }
/* * Returns 0 if TLB flush / invalidate required by caller. * va will indicate the address to be invalidated. * * addr is _either_ a host virtual address, or the address of the pte to * update, as indicated by the GNTMAP_contains_pte flag. */ static void __gnttab_map_grant_ref( struct gnttab_map_grant_ref *op) { struct domain *ld, *rd; struct vcpu *led; int handle; unsigned long frame = 0; int rc = GNTST_okay; unsigned int cache_flags; struct active_grant_entry *act; struct grant_mapping *mt; grant_entry_t *sha; union grant_combo scombo, prev_scombo, new_scombo; /* * We bound the number of times we retry CMPXCHG on memory locations that * we share with a guest OS. The reason is that the guest can modify that * location at a higher rate than we can read-modify-CMPXCHG, so the guest * could cause us to livelock. There are a few cases where it is valid for * the guest to race our updates (e.g., to change the GTF_readonly flag), * so we allow a few retries before failing. */ int retries = 0; led = current; ld = led->domain; if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) ) { gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags); op->status = GNTST_bad_gntref; return; } if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) ) { gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom); op->status = GNTST_bad_domain; return; } rc = xsm_grant_mapref(ld, rd, op->flags); if ( rc ) { rcu_unlock_domain(rd); op->status = GNTST_permission_denied; return; } if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) ) { rcu_unlock_domain(rd); gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n"); op->status = GNTST_no_device_space; return; } spin_lock(&rd->grant_table->lock); /* Bounds check on the grant ref */ if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table))) PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref); act = &active_entry(rd->grant_table, op->ref); sha = &shared_entry(rd->grant_table, op->ref); /* If already pinned, check the active domid and avoid refcnt overflow. */ if ( act->pin && ((act->domid != ld->domain_id) || (act->pin & 0x80808080U) != 0) ) PIN_FAIL(unlock_out, GNTST_general_error, "Bad domain (%d != %d), or risk of counter overflow %08x\n", act->domid, ld->domain_id, act->pin); if ( !act->pin || (!(op->flags & GNTMAP_readonly) && !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) ) { scombo.word = *(u32 *)&sha->flags; /* * This loop attempts to set the access (reading/writing) flags * in the grant table entry. It tries a cmpxchg on the field * up to five times, and then fails under the assumption that * the guest is misbehaving. */ for ( ; ; ) { /* If not already pinned, check the grant domid and type. */ if ( !act->pin && (((scombo.shorts.flags & GTF_type_mask) != GTF_permit_access) || (scombo.shorts.domid != ld->domain_id)) ) PIN_FAIL(unlock_out, GNTST_general_error, "Bad flags (%x) or dom (%d). (expected dom %d)\n", scombo.shorts.flags, scombo.shorts.domid, ld->domain_id); new_scombo = scombo; new_scombo.shorts.flags |= GTF_reading; if ( !(op->flags & GNTMAP_readonly) ) { new_scombo.shorts.flags |= GTF_writing; if ( unlikely(scombo.shorts.flags & GTF_readonly) ) PIN_FAIL(unlock_out, GNTST_general_error, "Attempt to write-pin a r/o grant entry.\n"); } prev_scombo.word = cmpxchg((u32 *)&sha->flags, scombo.word, new_scombo.word); if ( likely(prev_scombo.word == scombo.word) ) break; if ( retries++ == 4 ) PIN_FAIL(unlock_out, GNTST_general_error, "Shared grant entry is unstable.\n"); scombo = prev_scombo; } if ( !act->pin ) { act->domid = scombo.shorts.domid; act->frame = gmfn_to_mfn(rd, sha->frame); } } if ( op->flags & GNTMAP_device_map ) act->pin += (op->flags & GNTMAP_readonly) ? GNTPIN_devr_inc : GNTPIN_devw_inc; if ( op->flags & GNTMAP_host_map ) act->pin += (op->flags & GNTMAP_readonly) ? GNTPIN_hstr_inc : GNTPIN_hstw_inc; frame = act->frame; cache_flags = (sha->flags & (GTF_PAT | GTF_PWT | GTF_PCD) ); spin_unlock(&rd->grant_table->lock); if ( is_iomem_page(frame) ) { if ( !iomem_access_permitted(rd, frame, frame) ) { gdprintk(XENLOG_WARNING, "Iomem mapping not permitted %lx (domain %d)\n", frame, rd->domain_id); rc = GNTST_general_error; goto undo_out; } rc = create_grant_host_mapping( op->host_addr, frame, op->flags, cache_flags); if ( rc != GNTST_okay ) goto undo_out; } else { if ( unlikely(!mfn_valid(frame)) || unlikely(!(gnttab_host_mapping_get_page_type(op, ld, rd) ? get_page_and_type(mfn_to_page(frame), rd, PGT_writable_page) : get_page(mfn_to_page(frame), rd))) ) { if ( !rd->is_dying ) gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n", frame); rc = GNTST_general_error; goto undo_out; } if ( op->flags & GNTMAP_host_map ) { rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0); if ( rc != GNTST_okay ) { if ( gnttab_host_mapping_get_page_type(op, ld, rd) ) put_page_type(mfn_to_page(frame)); put_page(mfn_to_page(frame)); goto undo_out; } if ( op->flags & GNTMAP_device_map ) { (void)get_page(mfn_to_page(frame), rd); if ( !(op->flags & GNTMAP_readonly) ) get_page_type(mfn_to_page(frame), PGT_writable_page); } } } TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom); mt = &maptrack_entry(ld->grant_table, handle); mt->domid = op->dom; mt->ref = op->ref; mt->flags = op->flags; op->dev_bus_addr = (u64)frame << PAGE_SHIFT; op->handle = handle; op->status = GNTST_okay; rcu_unlock_domain(rd); return; undo_out: spin_lock(&rd->grant_table->lock); act = &active_entry(rd->grant_table, op->ref); sha = &shared_entry(rd->grant_table, op->ref); if ( op->flags & GNTMAP_device_map ) act->pin -= (op->flags & GNTMAP_readonly) ? GNTPIN_devr_inc : GNTPIN_devw_inc; if ( op->flags & GNTMAP_host_map ) act->pin -= (op->flags & GNTMAP_readonly) ? GNTPIN_hstr_inc : GNTPIN_hstw_inc; if ( !(op->flags & GNTMAP_readonly) && !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) ) gnttab_clear_flag(_GTF_writing, &sha->flags); if ( !act->pin ) gnttab_clear_flag(_GTF_reading, &sha->flags); unlock_out: spin_unlock(&rd->grant_table->lock); op->status = rc; put_maptrack_handle(ld->grant_table, handle); rcu_unlock_domain(rd); }
static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) { struct evtchn *lchn, *rchn; struct domain *ld = current->domain, *rd; int lport, rport = bind->remote_port; domid_t rdom = bind->remote_dom; long rc; if ( rdom == DOMID_SELF ) rdom = current->domain->domain_id; if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL ) return -ESRCH; /* Avoid deadlock by first acquiring lock of domain with smaller id. */ if ( ld < rd ) { spin_lock(&ld->evtchn_lock); spin_lock(&rd->evtchn_lock); } else { if ( ld != rd ) spin_lock(&rd->evtchn_lock); spin_lock(&ld->evtchn_lock); } if ( (lport = get_free_port(ld)) < 0 ) ERROR_EXIT(lport); lchn = evtchn_from_port(ld, lport); if ( !port_is_valid(rd, rport) ) ERROR_EXIT_DOM(-EINVAL, rd); rchn = evtchn_from_port(rd, rport); if ( (rchn->state != ECS_UNBOUND) || (rchn->u.unbound.remote_domid != ld->domain_id) ) ERROR_EXIT_DOM(-EINVAL, rd); rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn); if ( rc ) goto out; lchn->u.interdomain.remote_dom = rd; lchn->u.interdomain.remote_port = (u16)rport; lchn->state = ECS_INTERDOMAIN; rchn->u.interdomain.remote_dom = ld; rchn->u.interdomain.remote_port = (u16)lport; rchn->state = ECS_INTERDOMAIN; /* * We may have lost notifications on the remote unbound port. Fix that up * here by conservatively always setting a notification on the local port. */ evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); bind->local_port = lport; out: spin_unlock(&ld->evtchn_lock); if ( ld != rd ) spin_unlock(&rd->evtchn_lock); rcu_unlock_domain(rd); return rc; }
static int physdev_map_pirq(struct physdev_map_pirq *map) { struct domain *d; int vector, pirq, ret = 0; struct msi_info _msi; void *map_data = NULL; if ( !IS_PRIV(current->domain) ) return -EPERM; if ( !map ) return -EINVAL; if ( map->domid == DOMID_SELF ) d = rcu_lock_domain(current->domain); else d = rcu_lock_domain_by_id(map->domid); if ( d == NULL ) { ret = -ESRCH; goto free_domain; } /* Verify or get vector. */ switch ( map->type ) { case MAP_PIRQ_TYPE_GSI: if ( map->index < 0 || map->index >= NR_IRQS ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, map->index); ret = -EINVAL; goto free_domain; } vector = domain_irq_to_vector(current->domain, map->index); if ( !vector ) { dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n", d->domain_id, vector); ret = -EINVAL; goto free_domain; } break; case MAP_PIRQ_TYPE_MSI: vector = map->index; if ( vector == -1 ) vector = assign_irq_vector(AUTO_ASSIGN_IRQ); if ( vector < 0 || vector >= NR_VECTORS ) { dprintk(XENLOG_G_ERR, "dom%d: map irq with wrong vector %d\n", d->domain_id, vector); ret = -EINVAL; goto free_domain; } _msi.bus = map->bus; _msi.devfn = map->devfn; _msi.entry_nr = map->entry_nr; _msi.table_base = map->table_base; _msi.vector = vector; map_data = &_msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, map->type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_vector_to_irq(d, vector); if ( map->pirq < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, map->index, map->pirq, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, map->type, map->index); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( pirq && pirq != map->pirq ) { dprintk(XENLOG_G_ERR, "dom%d: vector %d conflicts with irq %d\n", d->domain_id, map->index, map->pirq); ret = -EEXIST; goto done; } else pirq = map->pirq; } ret = map_domain_pirq(d, pirq, vector, map->type, map_data); if ( ret == 0 ) map->pirq = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) ) free_irq_vector(vector); free_domain: rcu_unlock_domain(d); return ret; }
static int physdev_map_pirq(struct physdev_map_pirq *map) { struct domain *d; int pirq, irq, ret = 0; struct msi_info _msi; void *map_data = NULL; ret = rcu_lock_target_domain_by_id(map->domid, &d); if ( ret ) return ret; if ( map->domid == DOMID_SELF && is_hvm_domain(d) ) { ret = physdev_hvm_map_pirq(d, map); goto free_domain; } if ( !IS_PRIV_FOR(current->domain, d) ) { ret = -EPERM; goto free_domain; } /* Verify or get irq. */ switch ( map->type ) { case MAP_PIRQ_TYPE_GSI: if ( map->index < 0 || map->index >= nr_irqs_gsi ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, map->index); ret = -EINVAL; goto free_domain; } irq = domain_pirq_to_irq(current->domain, map->index); if ( irq <= 0 ) { if ( IS_PRIV(current->domain) ) irq = map->index; else { dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n", d->domain_id); ret = -EINVAL; goto free_domain; } } break; case MAP_PIRQ_TYPE_MSI: irq = map->index; if ( irq == -1 ) irq = create_irq(); if ( irq < 0 || irq >= nr_irqs ) { dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n", d->domain_id); ret = -EINVAL; goto free_domain; } _msi.bus = map->bus; _msi.devfn = map->devfn; _msi.entry_nr = map->entry_nr; _msi.table_base = map->table_base; _msi.irq = irq; map_data = &_msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, map->type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_irq_to_pirq(d, irq); if ( map->pirq < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, map->index, map->pirq, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, map->type, map->index); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( pirq && pirq != map->pirq ) { dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n", d->domain_id, map->index, map->pirq); ret = -EEXIST; goto done; } else pirq = map->pirq; } ret = map_domain_pirq(d, pirq, irq, map->type, map_data); if ( ret == 0 ) map->pirq = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) ) destroy_irq(irq); free_domain: rcu_unlock_domain(d); return ret; }
static int xenmem_add_to_physmap_one( struct domain *d, uint16_t space, domid_t foreign_domid, unsigned long idx, xen_pfn_t gpfn) { unsigned long mfn = 0; int rc; switch ( space ) { case XENMAPSPACE_grant_table: spin_lock(&d->grant_table->lock); if ( d->grant_table->gt_version == 0 ) d->grant_table->gt_version = 1; if ( d->grant_table->gt_version == 2 && (idx & XENMAPIDX_grant_table_status) ) { idx &= ~XENMAPIDX_grant_table_status; if ( idx < nr_status_frames(d->grant_table) ) mfn = virt_to_mfn(d->grant_table->status[idx]); } else { if ( (idx >= nr_grant_frames(d->grant_table)) && (idx < max_nr_grant_frames) ) gnttab_grow_table(d, idx + 1); if ( idx < nr_grant_frames(d->grant_table) ) mfn = virt_to_mfn(d->grant_table->shared_raw[idx]); } d->arch.grant_table_gpfn[idx] = gpfn; spin_unlock(&d->grant_table->lock); break; case XENMAPSPACE_shared_info: if ( idx == 0 ) mfn = virt_to_mfn(d->shared_info); break; case XENMAPSPACE_gmfn_foreign: { paddr_t maddr; struct domain *od; rc = rcu_lock_target_domain_by_id(foreign_domid, &od); if ( rc < 0 ) return rc; maddr = p2m_lookup(od, idx << PAGE_SHIFT); if ( maddr == INVALID_PADDR ) { dump_p2m_lookup(od, idx << PAGE_SHIFT); rcu_unlock_domain(od); return -EINVAL; } mfn = maddr >> PAGE_SHIFT; rcu_unlock_domain(od); break; } default: return -ENOSYS; } domain_lock(d); /* Map at new location. */ rc = guest_physmap_add_page(d, gpfn, mfn, 0); domain_unlock(d); return rc; }
static long evtchn_status(evtchn_status_t *status) { struct domain *d; domid_t dom = status->dom; int port = status->port; struct evtchn *chn; long rc = 0; rc = rcu_lock_target_domain_by_id(dom, &d); if ( rc ) return rc; spin_lock(&d->event_lock); if ( !port_is_valid(d, port) ) { rc = -EINVAL; goto out; } chn = evtchn_from_port(d, port); rc = xsm_evtchn_status(d, chn); if ( rc ) goto out; switch ( chn->state ) { case ECS_FREE: case ECS_RESERVED: status->status = EVTCHNSTAT_closed; break; case ECS_UNBOUND: status->status = EVTCHNSTAT_unbound; status->u.unbound.dom = chn->u.unbound.remote_domid; break; case ECS_INTERDOMAIN: status->status = EVTCHNSTAT_interdomain; status->u.interdomain.dom = chn->u.interdomain.remote_dom->domain_id; status->u.interdomain.port = chn->u.interdomain.remote_port; break; case ECS_PIRQ: status->status = EVTCHNSTAT_pirq; status->u.pirq = chn->u.pirq.irq; break; case ECS_VIRQ: status->status = EVTCHNSTAT_virq; status->u.virq = chn->u.virq; break; case ECS_IPI: status->status = EVTCHNSTAT_ipi; break; default: BUG(); } status->vcpu = chn->notify_vcpu_id; out: spin_unlock(&d->event_lock); rcu_unlock_domain(d); return rc; }
int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p, struct msi_info *msi) { struct domain *d = current->domain; int pirq, irq, ret = 0; void *map_data = NULL; if ( domid == DOMID_SELF && is_hvm_domain(d) ) { /* * Only makes sense for vector-based callback, else HVM-IRQ logic * calls back into itself and deadlocks on hvm_domain.irq_lock. */ if ( !is_hvm_pv_evtchn_domain(d) ) return -EINVAL; return physdev_hvm_map_pirq(d, type, index, pirq_p); } d = rcu_lock_domain_by_any_id(domid); if ( d == NULL ) return -ESRCH; ret = xsm_map_domain_pirq(XSM_TARGET, d); if ( ret ) goto free_domain; /* Verify or get irq. */ switch ( type ) { case MAP_PIRQ_TYPE_GSI: if ( *index < 0 || *index >= nr_irqs_gsi ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, *index); ret = -EINVAL; goto free_domain; } irq = domain_pirq_to_irq(current->domain, *index); if ( irq <= 0 ) { if ( is_hardware_domain(current->domain) ) irq = *index; else { dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n", d->domain_id); ret = -EINVAL; goto free_domain; } } break; case MAP_PIRQ_TYPE_MSI: irq = *index; if ( irq == -1 ) irq = create_irq(NUMA_NO_NODE); if ( irq < nr_irqs_gsi || irq >= nr_irqs ) { dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n", d->domain_id); ret = -EINVAL; goto free_domain; } msi->irq = irq; map_data = msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); pirq = domain_irq_to_pirq(d, irq); if ( *pirq_p < 0 ) { if ( pirq ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, *index, *pirq_p, pirq); if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, type); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( pirq && pirq != *pirq_p ) { dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n", d->domain_id, *index, *pirq_p); ret = -EEXIST; goto done; } else pirq = *pirq_p; } ret = map_domain_pirq(d, pirq, irq, type, map_data); if ( ret == 0 ) *pirq_p = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (type == MAP_PIRQ_TYPE_MSI) && (*index == -1) ) destroy_irq(irq); free_domain: rcu_unlock_domain(d); return ret; }