/* Complete domain destroy after RCU readers are not holding old references. */ static void complete_domain_destroy(struct rcu_head *head) { struct domain *d = container_of(head, struct domain, rcu); struct vcpu *v; int i; for ( i = d->max_vcpus - 1; i >= 0; i-- ) { if ( (v = d->vcpu[i]) == NULL ) continue; tasklet_kill(&v->continue_hypercall_tasklet); vcpu_destroy(v); sched_destroy_vcpu(v); destroy_waitqueue_vcpu(v); } grant_table_destroy(d); arch_domain_destroy(d); watchdog_domain_destroy(d); rangeset_domain_destroy(d); cpupool_rm_domain(d); sched_destroy_domain(d); /* Free page used by xen oprofile buffer. */ free_xenoprof_pages(d); xfree(d->mem_event); for ( i = d->max_vcpus - 1; i >= 0; i-- ) if ( (v = d->vcpu[i]) != NULL ) free_vcpu_struct(v); if ( d->target != NULL ) put_domain(d->target); evtchn_destroy_final(d); xfree(d->pirq_mask); xfree(d->pirq_to_evtchn); xsm_free_security_domain(d); free_domain_struct(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); }
struct domain *domain_create( domid_t domid, unsigned int domcr_flags, uint32_t ssidref) { struct domain *d, **pd; enum { INIT_xsm = 1u<<0, INIT_watchdog = 1u<<1, INIT_rangeset = 1u<<2, INIT_evtchn = 1u<<3, INIT_gnttab = 1u<<4, INIT_arch = 1u<<5 }; int err, init_status = 0; int poolid = CPUPOOLID_NONE; if ( (d = alloc_domain_struct()) == NULL ) return ERR_PTR(-ENOMEM); d->domain_id = domid; lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain"); if ( (err = xsm_alloc_security_domain(d)) != 0 ) goto fail; init_status |= INIT_xsm; watchdog_domain_init(d); init_status |= INIT_watchdog; atomic_set(&d->refcnt, 1); spin_lock_init_prof(d, domain_lock); spin_lock_init_prof(d, page_alloc_lock); spin_lock_init(&d->hypercall_deadlock_mutex); INIT_PAGE_LIST_HEAD(&d->page_list); INIT_PAGE_LIST_HEAD(&d->xenpage_list); spin_lock_init(&d->node_affinity_lock); d->node_affinity = NODE_MASK_ALL; d->auto_node_affinity = 1; spin_lock_init(&d->shutdown_lock); d->shutdown_code = -1; err = -ENOMEM; if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) ) goto fail; if ( domcr_flags & DOMCRF_hvm ) d->is_hvm = 1; if ( domid == 0 ) { d->is_pinned = opt_dom0_vcpus_pin; d->disable_migrate = 1; } rangeset_domain_initialise(d); init_status |= INIT_rangeset; d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); d->irq_caps = rangeset_new(d, "Interrupts", 0); if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) ) goto fail; if ( domcr_flags & DOMCRF_dummy ) return d; if ( !is_idle_domain(d) ) { if ( (err = xsm_domain_create(XSM_HOOK, d, ssidref)) != 0 ) goto fail; d->is_paused_by_controller = 1; atomic_inc(&d->pause_count); if ( domid ) d->nr_pirqs = nr_static_irqs + extra_domU_irqs; else d->nr_pirqs = nr_static_irqs + extra_dom0_irqs; if ( d->nr_pirqs > nr_irqs ) d->nr_pirqs = nr_irqs; radix_tree_init(&d->pirq_tree); if ( (err = evtchn_init(d)) != 0 ) goto fail; init_status |= INIT_evtchn; if ( (err = grant_table_create(d)) != 0 ) goto fail; init_status |= INIT_gnttab; poolid = 0; err = -ENOMEM; d->mem_event = xzalloc(struct mem_event_per_domain); if ( !d->mem_event ) goto fail; } if ( (err = arch_domain_create(d, domcr_flags)) != 0 ) goto fail; init_status |= INIT_arch; if ( (err = cpupool_add_domain(d, poolid)) != 0 ) goto fail; if ( (err = sched_init_domain(d)) != 0 ) goto fail; if ( !is_idle_domain(d) ) { spin_lock(&domlist_update_lock); pd = &domain_list; /* NB. domain_list maintained in order of domid. */ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) if ( (*pd)->domain_id > d->domain_id ) break; d->next_in_list = *pd; d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)]; rcu_assign_pointer(*pd, d); rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d); spin_unlock(&domlist_update_lock); } return d; fail: d->is_dying = DOMDYING_dead; atomic_set(&d->refcnt, DOMAIN_DESTROYED); xfree(d->mem_event); if ( init_status & INIT_arch ) arch_domain_destroy(d); if ( init_status & INIT_gnttab ) grant_table_destroy(d); if ( init_status & INIT_evtchn ) { evtchn_destroy(d); evtchn_destroy_final(d); radix_tree_destroy(&d->pirq_tree, free_pirq_struct); } if ( init_status & INIT_rangeset ) rangeset_domain_destroy(d); if ( init_status & INIT_watchdog ) watchdog_domain_destroy(d); if ( init_status & INIT_xsm ) xsm_free_security_domain(d); free_cpumask_var(d->domain_dirty_cpumask); free_domain_struct(d); return ERR_PTR(err); }
struct domain *domain_create(domid_t dom_id, unsigned int cpu) { struct domain *d, **pd; struct vcpu *v; if ( (d = alloc_domain()) == NULL ) return NULL; d->domain_id = dom_id; atomic_set(&d->refcnt, 1); spin_lock_init(&d->big_lock); spin_lock_init(&d->page_alloc_lock); INIT_LIST_HEAD(&d->page_list); INIT_LIST_HEAD(&d->xenpage_list); rangeset_domain_initialise(d); if ( !is_idle_domain(d) ) { set_bit(_DOMF_ctrl_pause, &d->domain_flags); if ( evtchn_init(d) != 0 ) goto fail1; if ( grant_table_create(d) != 0 ) goto fail2; } if ( arch_domain_create(d) != 0 ) goto fail3; if ( (v = alloc_vcpu(d, 0, cpu)) == NULL ) goto fail4; d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); d->irq_caps = rangeset_new(d, "Interrupts", 0); if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) ) goto fail4; /* NB. alloc_vcpu() is undone in free_domain() */ #if 0 if ( sched_init_domain(d) != 0 ) goto fail4; #endif if ( !is_idle_domain(d) ) { write_lock(&domlist_lock); pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) if ( (*pd)->domain_id > d->domain_id ) break; d->next_in_list = *pd; *pd = d; d->next_in_hashbucket = domain_hash[DOMAIN_HASH(dom_id)]; domain_hash[DOMAIN_HASH(dom_id)] = d; write_unlock(&domlist_lock); } return d; fail4: arch_domain_destroy(d); fail3: if ( !is_idle_domain(d) ) grant_table_destroy(d); fail2: if ( !is_idle_domain(d) ) evtchn_destroy(d); fail1: rangeset_domain_destroy(d); free_domain(d); return NULL; }