static void __init start_of_day(void) { init_IRQ(); scheduler_init(); /* create idle domain */ idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0); if ((idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL)) BUG(); set_current(idle_domain->vcpu[0]); idle_vcpu[0] = current; initialize_keytable(); /* Register another key that will allow for the the Harware Probe * to be contacted, this works with RiscWatch probes and should * work with Chronos and FSPs */ register_keyhandler('^', key_hw_probe_attn, "Trap to Hardware Probe"); /* allow the dumping of the devtree */ register_keyhandler('D', key_ofdump , "Dump OF Devtree"); timer_init(); rcu_init(); serial_init_postirq(); do_initcalls(); }
struct vcpu *__init alloc_dom0_vcpu0(void) { if ( opt_dom0_max_vcpus == 0 ) opt_dom0_max_vcpus = num_cpupool_cpus(cpupool0); if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS ) opt_dom0_max_vcpus = MAX_VIRT_CPUS; dom0->vcpu = xmalloc_array(struct vcpu *, opt_dom0_max_vcpus); if ( !dom0->vcpu ) return NULL; memset(dom0->vcpu, 0, opt_dom0_max_vcpus * sizeof(*dom0->vcpu)); dom0->max_vcpus = opt_dom0_max_vcpus; #ifdef PERF_MON return alloc_vcpu(dom0, 0, 0,0); #else return alloc_vcpu(dom0, 0, 0); #endif }
struct vcpu *__init alloc_dom0_vcpu0(void) { if ( opt_dom0_max_vcpus == 0 ) opt_dom0_max_vcpus = num_online_cpus(); if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS ) opt_dom0_max_vcpus = MAX_VIRT_CPUS; dom0->vcpu = xmalloc_array(struct vcpu *, opt_dom0_max_vcpus); if ( !dom0->vcpu ) return NULL; memset(dom0->vcpu, 0, opt_dom0_max_vcpus * sizeof(*dom0->vcpu)); dom0->max_vcpus = opt_dom0_max_vcpus; return alloc_vcpu(dom0, 0, 0); }
struct vcpu *__init dom0_setup_vcpu(struct domain *d, unsigned int vcpu_id, unsigned int prev_cpu) { unsigned int cpu = cpumask_cycle(prev_cpu, &dom0_cpus); struct vcpu *v = alloc_vcpu(d, vcpu_id, cpu); if ( v ) { if ( !d->is_pinned && !dom0_affinity_relaxed ) cpumask_copy(v->cpu_hard_affinity, &dom0_cpus); cpumask_copy(v->cpu_soft_affinity, &dom0_cpus); } return v; }
struct vcpu *__init alloc_dom0_vcpu0(void) { unsigned max_vcpus; max_vcpus = num_cpupool_cpus(cpupool0); if ( opt_dom0_max_vcpus_min > max_vcpus ) max_vcpus = opt_dom0_max_vcpus_min; if ( opt_dom0_max_vcpus_max < max_vcpus ) max_vcpus = opt_dom0_max_vcpus_max; if ( max_vcpus > MAX_VIRT_CPUS ) max_vcpus = MAX_VIRT_CPUS; dom0->vcpu = xzalloc_array(struct vcpu *, max_vcpus); if ( !dom0->vcpu ) return NULL; dom0->max_vcpus = max_vcpus; return alloc_vcpu(dom0, 0, 0); }
/* This is the first C code that secondary processors invoke. */ void secondary_cpu_init(int cpuid, unsigned long r4) { struct vcpu *vcpu; cpu_initialize(cpuid); smp_generic_take_timebase(); /* If we are online, we must be able to ACK IPIs. */ mpic_setup_this_cpu(); cpu_set(cpuid, cpu_online_map); vcpu = alloc_vcpu(idle_domain, cpuid, cpuid); BUG_ON(vcpu == NULL); set_current(idle_domain->vcpu[cpuid]); idle_vcpu[cpuid] = current; startup_cpu_idle_loop(); panic("should never get here\n"); }
struct domain *domain_create(domid_t dom_id, unsigned int cpu) { struct domain *d, **pd; struct vcpu *v; if ( (d = alloc_domain()) == NULL ) return NULL; d->domain_id = dom_id; atomic_set(&d->refcnt, 1); spin_lock_init(&d->big_lock); spin_lock_init(&d->page_alloc_lock); INIT_LIST_HEAD(&d->page_list); INIT_LIST_HEAD(&d->xenpage_list); rangeset_domain_initialise(d); if ( !is_idle_domain(d) ) { set_bit(_DOMF_ctrl_pause, &d->domain_flags); if ( evtchn_init(d) != 0 ) goto fail1; if ( grant_table_create(d) != 0 ) goto fail2; } if ( arch_domain_create(d) != 0 ) goto fail3; if ( (v = alloc_vcpu(d, 0, cpu)) == NULL ) goto fail4; d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); d->irq_caps = rangeset_new(d, "Interrupts", 0); if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) ) goto fail4; /* NB. alloc_vcpu() is undone in free_domain() */ #if 0 if ( sched_init_domain(d) != 0 ) goto fail4; #endif if ( !is_idle_domain(d) ) { write_lock(&domlist_lock); pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) if ( (*pd)->domain_id > d->domain_id ) break; d->next_in_list = *pd; *pd = d; d->next_in_hashbucket = domain_hash[DOMAIN_HASH(dom_id)]; domain_hash[DOMAIN_HASH(dom_id)] = d; write_unlock(&domlist_lock); } return d; fail4: arch_domain_destroy(d); fail3: if ( !is_idle_domain(d) ) grant_table_destroy(d); fail2: if ( !is_idle_domain(d) ) evtchn_destroy(d); fail1: rangeset_domain_destroy(d); free_domain(d); return NULL; }
int construct_guest_dom(struct domain *d, unsigned long guest_size, unsigned long image_start, unsigned long image_size, unsigned long initrd_start, unsigned long initrd_size, char *cmdline) { char *p = NULL; int i; int rc; unsigned long nr_pages; unsigned long nr_pt_pages; unsigned long map_track; unsigned long phys_offset; struct page_info *page = NULL; struct start_info *si = NULL; struct domain_setup_info dsi; struct vcpu *v = NULL; uint32_t domain_features_supported[XENFEAT_NR_SUBMAPS] = { 0 }; uint32_t domain_features_required[XENFEAT_NR_SUBMAPS] = { 0 }; BUG_ON(d == NULL); BUG_ON(d->domain_id <= 0); BUG_ON(d->vcpu[0] == NULL); v = d->vcpu[0]; printk("Image Start = 0x%x\n", image_start); /* Guest partition should be aligned to 1MB boundary */ ASSERT((guest_size & 0xFFFFF) == 0); BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags)); write_ptbase(idle_domain->vcpu[0]); memset(&dsi, 0, sizeof(struct domain_setup_info)); dsi.image_addr = image_start; dsi.image_len = image_size; printk("*** LOADING DOMAIN : %d ***\n", (int)d->domain_id); d->max_pages = ~0U; rc = parseelfimage(&dsi); if (rc != 0) { local_irq_enable(); return rc; } if (dsi.xen_section_string == NULL) { printk("Not a Xen-ELF image: '__xen_guest' section not found.\n"); local_irq_enable(); return -EINVAL; } if ((p = strstr(dsi.xen_section_string, "FEATURES=")) != NULL) { parse_features(p + strlen("FEATURES="), domain_features_supported, domain_features_required); printk("Guest kernel supports features = { %08x }.\n", domain_features_supported[0]); printk("Guest kernel requires features = { %08x }.\n", domain_features_required[0]); if (domain_features_required[0]) { printk("Guest kernel requires an unsupported hypervisor feature.\n"); local_irq_enable(); return -EINVAL; } } page = (struct page_info *) pages_u_alloc(d, get_order_from_bytes(guest_size), ~ALLOC_DOM_DMA); if (page == NULL) { printk("Not enough RAM for domain %d allocation.\n", d->domain_id); return -ENOMEM; } dsi.p_start = page_to_phys(page); dsi.p_end = dsi.p_start + guest_size; printk("Guest physical: 0x%x-0x%x\n", dsi.p_start, dsi.p_end); dsi.v_start &= (~(0xFFFFF)); nr_pt_pages = build_guest_tables(v, &dsi); write_ptbase(current); rc = inspect_guest_tables(v); if(!rc) { panic("Wrong guest table found\n"); } nr_pages = guest_size >> PAGE_SHIFT; if (d->tot_pages < nr_pages) printk(" (%lu pages to be allocated)", nr_pages - d->tot_pages); for (i = 0; i < MAX_VIRT_CPUS; i++) d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1; for (i = 1; i < num_online_cpus(); i++) (void)alloc_vcpu(d, i, i); write_ptbase(v); phys_offset = v->arch.guest_pstart - v->arch.guest_vstart; dsi.image_addr -= phys_offset; /* Copy the OS image and free temporary buffer. */ (void)loadelfimage(&dsi); map_track = round_pgup((unsigned long)(v->arch.guest_vtable) + (PAGE_SIZE * nr_pt_pages)); si = (start_info_t *)map_track; memset(si, 0, PAGE_SIZE); si->nr_pages = nr_pages; #if 0 si->shared_info = virt_to_phys(d->shared_info); #endif si->shared_info = d->shared_info; si->flags = 0; si->pt_base = (unsigned long)v->arch.guest_vtable; si->nr_pt_frames = nr_pt_pages; si->mfn_list = NULL; si->min_mfn = dsi.p_start >> PAGE_SHIFT; map_track += PAGE_SIZE; if (initrd_size != 0) { si->mod_start = map_track; si->mod_len = initrd_size; printk("Initrd len 0x%lx, start at 0x%lx\n", si->mod_len, si->mod_start); memcpy((void *)map_track, (const void *)(initrd_start - phys_offset), initrd_size); map_track = round_pgup(map_track + initrd_size); } memset(map_track, 0, (PAGE_SIZE * 2)); si->store_mfn = (map_track + phys_offset) >> PAGE_SHIFT; si->store_evtchn = d->store_port; map_track += PAGE_SIZE; si->console_mfn = (map_track + phys_offset) >> PAGE_SHIFT; si->console_evtchn = d->console_port; map_track += PAGE_SIZE; d->console_mfn = si->console_mfn; d->store_mfn = si->store_mfn; memset(si->cmd_line, 0, sizeof(si->cmd_line)); if (cmdline != NULL) strncpy((char *)si->cmd_line, cmdline, sizeof(si->cmd_line)-1); #if 0 /* setup shared info table which is specified each domain */ rc = setup_shared_info_mapping(d, NULL); if (rc != 0) { return rc; } #endif write_ptbase(current); //init_domain_time(d); set_bit(_VCPUF_initialised, &v->vcpu_flags); new_thread(v, dsi.v_kernentry, map_track + PAGE_SIZE, (unsigned long)si); i = 0; BUG_ON(i != 0); return 0; }