int set_global_virq_handler(struct domain *d, uint32_t virq) { struct domain *old; if (virq >= NR_VIRQS) return -EINVAL; if (!virq_is_global(virq)) return -EINVAL; if (global_virq_handlers[virq] == d) return 0; if (unlikely(!get_domain(d))) return -EINVAL; spin_lock(&global_virq_handlers_lock); old = global_virq_handlers[virq]; global_virq_handlers[virq] = d; spin_unlock(&global_virq_handlers_lock); if (old != NULL) put_domain(old); return 0; }
static void domain_shutdown_finalise(void) { struct domain *d; struct vcpu *v; d = domain_shuttingdown[smp_processor_id()]; domain_shuttingdown[smp_processor_id()] = NULL; BUG_ON(d == NULL); BUG_ON(d == current->domain); LOCK_BIGLOCK(d); /* Make sure that every vcpu is descheduled before we finalise. */ for_each_vcpu ( d, v ) vcpu_sleep_sync(v); BUG_ON(!cpus_empty(d->domain_dirty_cpumask)); sync_pagetable_state(d); /* Don't set DOMF_shutdown until execution contexts are sync'ed. */ if ( !test_and_set_bit(_DOMF_shutdown, &d->domain_flags) ) send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC); UNLOCK_BIGLOCK(d); put_domain(d); }
static inline int is_free_domid(domid_t dom) { struct domain *d; if ( dom >= DOMID_FIRST_RESERVED ) return 0; if ( (d = find_domain_by_id(dom)) == NULL ) return 1; put_domain(d); return 0; }
/* Complete domain destroy after RCU readers are not holding old references. */ static void complete_domain_destroy(struct rcu_head *head) { struct domain *d = container_of(head, struct domain, rcu); struct vcpu *v; int i; for ( i = d->max_vcpus - 1; i >= 0; i-- ) { if ( (v = d->vcpu[i]) == NULL ) continue; tasklet_kill(&v->continue_hypercall_tasklet); vcpu_destroy(v); sched_destroy_vcpu(v); destroy_waitqueue_vcpu(v); } grant_table_destroy(d); arch_domain_destroy(d); watchdog_domain_destroy(d); rangeset_domain_destroy(d); cpupool_rm_domain(d); sched_destroy_domain(d); /* Free page used by xen oprofile buffer. */ free_xenoprof_pages(d); xfree(d->mem_event); for ( i = d->max_vcpus - 1; i >= 0; i-- ) if ( (v = d->vcpu[i]) != NULL ) free_vcpu_struct(v); if ( d->target != NULL ) put_domain(d->target); evtchn_destroy_final(d); xfree(d->pirq_mask); xfree(d->pirq_to_evtchn); xsm_free_security_domain(d); free_domain_struct(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); }
void domain_kill(struct domain *d) { struct vcpu *v; domain_pause(d); if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) ) { for_each_vcpu(d, v) sched_rem_domain(v); gnttab_release_mappings(d); domain_relinquish_resources(d); put_domain(d); send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC); } }
static void clear_global_virq_handlers(struct domain *d) { uint32_t virq; int put_count = 0; spin_lock(&global_virq_handlers_lock); for (virq = 0; virq < NR_VIRQS; virq++) { if (global_virq_handlers[virq] == d) { global_virq_handlers[virq] = NULL; put_count++; } } spin_unlock(&global_virq_handlers_lock); while (put_count) { put_domain(d); put_count--; } }
int domain_kill(struct domain *d) { int rc = 0; if ( d == current->domain ) return -EINVAL; /* Protected by domctl_lock. */ switch ( d->is_dying ) { case DOMDYING_alive: domain_pause(d); d->is_dying = DOMDYING_dying; spin_barrier(&d->domain_lock); evtchn_destroy(d); gnttab_release_mappings(d); tmem_destroy(d->tmem); d->tmem = NULL; /* fallthrough */ case DOMDYING_dying: rc = domain_relinquish_resources(d); if ( rc != 0 ) { BUG_ON(rc != -EAGAIN); break; } d->is_dying = DOMDYING_dead; put_domain(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); /* fallthrough */ case DOMDYING_dead: break; } return rc; }
static long __evtchn_close(struct domain *d1, int port1) { struct domain *d2 = NULL; struct vcpu *v; struct evtchn *chn1, *chn2; int port2; long rc = 0; again: spin_lock(&d1->evtchn_lock); if ( !port_is_valid(d1, port1) ) { rc = -EINVAL; goto out; } chn1 = evtchn_from_port(d1, port1); /* Guest cannot close a Xen-attached event channel. */ if ( unlikely(chn1->consumer_is_xen) ) { rc = -EINVAL; goto out; } switch ( chn1->state ) { case ECS_FREE: case ECS_RESERVED: rc = -EINVAL; goto out; case ECS_UNBOUND: break; case ECS_PIRQ: pirq_guest_unbind(d1, chn1->u.pirq); d1->pirq_to_evtchn[chn1->u.pirq] = 0; break; case ECS_VIRQ: for_each_vcpu ( d1, v ) { if ( v->virq_to_evtchn[chn1->u.virq] != port1 ) continue; v->virq_to_evtchn[chn1->u.virq] = 0; spin_barrier(&v->virq_lock); } break; case ECS_IPI: break; case ECS_INTERDOMAIN: if ( d2 == NULL ) { d2 = chn1->u.interdomain.remote_dom; /* If we unlock d1 then we could lose d2. Must get a reference. */ if ( unlikely(!get_domain(d2)) ) BUG(); if ( d1 < d2 ) { spin_lock(&d2->evtchn_lock); } else if ( d1 != d2 ) { spin_unlock(&d1->evtchn_lock); spin_lock(&d2->evtchn_lock); goto again; } } else if ( d2 != chn1->u.interdomain.remote_dom ) { /* * We can only get here if the port was closed and re-bound after * unlocking d1 but before locking d2 above. We could retry but * it is easier to return the same error as if we had seen the * port in ECS_CLOSED. It must have passed through that state for * us to end up here, so it's a valid error to return. */ rc = -EINVAL; goto out; } port2 = chn1->u.interdomain.remote_port; BUG_ON(!port_is_valid(d2, port2)); chn2 = evtchn_from_port(d2, port2); BUG_ON(chn2->state != ECS_INTERDOMAIN); BUG_ON(chn2->u.interdomain.remote_dom != d1); chn2->state = ECS_UNBOUND; chn2->u.unbound.remote_domid = d1->domain_id; break; default: BUG(); } /* Clear pending event to avoid unexpected behavior on re-bind. */ clear_bit(port1, shared_info_addr(d1, evtchn_pending)); /* Reset binding to vcpu0 when the channel is freed. */ chn1->state = ECS_FREE; chn1->notify_vcpu_id = 0; xsm_evtchn_close_post(chn1); out: if ( d2 != NULL ) { if ( d1 != d2 ) spin_unlock(&d2->evtchn_lock); put_domain(d2); } spin_unlock(&d1->evtchn_lock); return rc; }
void mc_memerr_dhandler(struct mca_binfo *binfo, enum mce_result *result, struct cpu_user_regs *regs) { struct mcinfo_bank *bank = binfo->mib; struct mcinfo_global *global = binfo->mig; struct domain *d; unsigned long mfn, gfn; uint32_t status; int vmce_vcpuid; if (!mc_check_addr(bank->mc_status, bank->mc_misc, MC_ADDR_PHYSICAL)) { dprintk(XENLOG_WARNING, "No physical address provided for memory error\n"); return; } mfn = bank->mc_addr >> PAGE_SHIFT; if (offline_page(mfn, 1, &status)) { dprintk(XENLOG_WARNING, "Failed to offline page %lx for MCE error\n", mfn); return; } mci_action_add_pageoffline(binfo->bank, binfo->mi, mfn, status); /* This is free page */ if (status & PG_OFFLINE_OFFLINED) *result = MCER_RECOVERED; else if (status & PG_OFFLINE_AGAIN) *result = MCER_CONTINUE; else if (status & PG_OFFLINE_PENDING) { /* This page has owner */ if (status & PG_OFFLINE_OWNED) { bank->mc_domid = status >> PG_OFFLINE_OWNER_SHIFT; mce_printk(MCE_QUIET, "MCE: This error page is ownded" " by DOM %d\n", bank->mc_domid); /* XXX: Cannot handle shared pages yet * (this should identify all domains and gfn mapping to * the mfn in question) */ BUG_ON( bank->mc_domid == DOMID_COW ); if ( bank->mc_domid != DOMID_XEN ) { d = get_domain_by_id(bank->mc_domid); ASSERT(d); gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT); if ( !is_vmce_ready(bank, d) ) { printk("DOM%d not ready for vMCE\n", d->domain_id); goto vmce_failed; } if ( unmmap_broken_page(d, _mfn(mfn), gfn) ) { printk("Unmap broken memory %lx for DOM%d failed\n", mfn, d->domain_id); goto vmce_failed; } bank->mc_addr = gfn << PAGE_SHIFT | (bank->mc_addr & (PAGE_SIZE -1 )); if ( fill_vmsr_data(bank, d, global->mc_gstatus) == -1 ) { mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d " "failed\n", bank->mc_domid); goto vmce_failed; } if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) vmce_vcpuid = VMCE_INJECT_BROADCAST; else vmce_vcpuid = global->mc_vcpuid; /* We will inject vMCE to DOMU*/ if ( inject_vmce(d, vmce_vcpuid) < 0 ) { mce_printk(MCE_QUIET, "inject vMCE to DOM%d" " failed\n", d->domain_id); goto vmce_failed; } /* Impacted domain go on with domain's recovery job * if the domain has its own MCA handler. * For xen, it has contained the error and finished * its own recovery job. */ *result = MCER_RECOVERED; put_domain(d); return; vmce_failed: put_domain(d); domain_crash(d); } }