static int __init modify_identity_mmio(struct domain *d, unsigned long pfn, unsigned long nr_pages, const bool map) { int rc; for ( ; ; ) { rc = (map ? map_mmio_regions : unmap_mmio_regions) (d, _gfn(pfn), nr_pages, _mfn(pfn)); if ( rc == 0 ) break; if ( rc < 0 ) { printk(XENLOG_WARNING "Failed to identity %smap [%#lx,%#lx) for d%d: %d\n", map ? "" : "un", pfn, pfn + nr_pages, d->domain_id, rc); break; } nr_pages -= rc; pfn += rc; process_pending_softirqs(); } return rc; }
/* Populate a HVM memory range using the biggest possible order. */ static int __init pvh_populate_memory_range(struct domain *d, unsigned long start, unsigned long nr_pages) { unsigned int order, i = 0; struct page_info *page; int rc; #define MAP_MAX_ITER 64 order = MAX_ORDER; while ( nr_pages != 0 ) { unsigned int range_order = get_order_from_pages(nr_pages + 1); order = min(range_order ? range_order - 1 : 0, order); page = alloc_domheap_pages(d, order, dom0_memflags); if ( page == NULL ) { if ( order == 0 && dom0_memflags ) { /* Try again without any dom0_memflags. */ dom0_memflags = 0; order = MAX_ORDER; continue; } if ( order == 0 ) { printk("Unable to allocate memory with order 0!\n"); return -ENOMEM; } order--; continue; } rc = guest_physmap_add_page(d, _gfn(start), _mfn(page_to_mfn(page)), order); if ( rc != 0 ) { printk("Failed to populate memory: [%#lx,%lx): %d\n", start, start + (1UL << order), rc); return -ENOMEM; } start += 1UL << order; nr_pages -= 1UL << order; if ( (++i % MAP_MAX_ITER) == 0 ) process_pending_softirqs(); } return 0; #undef MAP_MAX_ITER }
/* Wait for a remote CPU to die */ void __cpu_die(unsigned int cpu) { unsigned int i = 0; while ( !cpu_is_dead ) { mdelay(100); cpu_relax(); process_pending_softirqs(); if ( (++i % 10) == 0 ) printk(KERN_ERR "CPU %u still not dead...\n", cpu); smp_mb(); } cpu_is_dead = 0; smp_mb(); }
/* Bring up a remote CPU */ int __cpu_up(unsigned int cpu) { int rc; printk("Bringing up CPU%d\n", cpu); rc = init_secondary_pagetables(cpu); if ( rc < 0 ) return rc; console_start_sync(); /* Secondary may use early_printk */ /* Tell the remote CPU which stack to boot on. */ init_data.stack = idle_vcpu[cpu]->arch.stack; /* Tell the remote CPU what is it's logical CPU ID */ init_data.cpuid = cpu; /* Open the gate for this CPU */ smp_up_cpu = cpu_logical_map(cpu); flush_xen_dcache(smp_up_cpu); rc = arch_cpu_up(cpu); console_end_sync(); if ( rc < 0 ) { printk("Failed to bring up CPU%d\n", cpu); return rc; } while ( !cpu_online(cpu) ) { cpu_relax(); process_pending_softirqs(); } return 0; }
/* Bring up a remote CPU */ int __cpu_up(unsigned int cpu) { /* Tell the remote CPU which stack to boot on. */ init_stack = idle_vcpu[cpu]->arch.stack; /* Unblock the CPU. It should be waiting in the loop in head.S * for an event to arrive when smp_up_cpu matches its cpuid. */ smp_up_cpu = cpu; /* we need to make sure that the change to smp_up_cpu is visible to * secondary cpus with D-cache off */ flush_xen_dcache(smp_up_cpu); isb(); sev(); while ( !cpu_online(cpu) ) { cpu_relax(); process_pending_softirqs(); } return 0; }
int __init construct_dom0(struct domain *d, const module_t *image, unsigned long image_headroom, module_t *initrd, void *(*bootstrap_map)(const module_t *), char *cmdline) { /* Sanity! */ BUG_ON(d->domain_id != 0); BUG_ON(d->vcpu[0] == NULL); BUG_ON(d->vcpu[0]->is_initialised); process_pending_softirqs(); #ifdef CONFIG_SHADOW_PAGING if ( opt_dom0_shadow && !dom0_pvh ) { opt_dom0_shadow = false; printk(XENLOG_WARNING "Shadow Dom0 requires PVH. Option ignored.\n"); } #endif return (is_hvm_domain(d) ? dom0_construct_pvh : dom0_construct_pv) (d, image, image_headroom, initrd,bootstrap_map, cmdline); }
static void acpi_processor_idle(void) { struct acpi_processor_power *power = processor_powers[smp_processor_id()]; struct acpi_processor_cx *cx = NULL; int next_state; uint64_t t1, t2 = 0; u32 exp = 0, pred = 0; u32 irq_traced[4] = { 0 }; if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() && (next_state = cpuidle_current_governor->select(power)) > 0 ) { cx = &power->states[next_state]; if ( power->flags.bm_check && acpi_idle_bm_check() && cx->type == ACPI_STATE_C3 ) cx = power->safe_state; if ( cx->idx > max_cstate ) cx = &power->states[max_cstate]; menu_get_trace_data(&exp, &pred); } if ( !cx ) { if ( pm_idle_save ) pm_idle_save(); else safe_halt(); return; } cpufreq_dbs_timer_suspend(); sched_tick_suspend(); /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */ process_pending_softirqs(); /* * Interrupts must be disabled during bus mastering calculations and * for C2/C3 transitions. */ local_irq_disable(); if ( !cpu_is_haltable(smp_processor_id()) ) { local_irq_enable(); sched_tick_resume(); cpufreq_dbs_timer_resume(); return; } if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() ) cx = power->safe_state; power->last_state = cx; /* * Sleep: * ------ * Invoke the current Cx state to put the processor to sleep. */ switch ( cx->type ) { case ACPI_STATE_C1: case ACPI_STATE_C2: if ( cx->type == ACPI_STATE_C1 || local_apic_timer_c2_ok ) { /* Get start time (ticks) */ t1 = get_tick(); /* Trace cpu idle entry */ TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); /* Invoke C2 */ acpi_idle_do_entry(cx); /* Get end time (ticks) */ t2 = get_tick(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Update statistics */ acpi_update_idle_stats(power, cx, ticks_elapsed(t1, t2)); /* Re-enable interrupts */ local_irq_enable(); break; } case ACPI_STATE_C3: /* * Before invoking C3, be aware that TSC/APIC timer may be * stopped by H/W. Without carefully handling of TSC/APIC stop issues, * deep C state can't work correctly. */ /* preparing APIC stop */ lapic_timer_off(); /* Get start time (ticks) */ t1 = get_tick(); /* Trace cpu idle entry */ TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if ( cx->type != ACPI_STATE_C3 ) /* nothing to be done here */; else if ( power->flags.bm_check && power->flags.bm_control ) { spin_lock(&c3_cpu_status.lock); if ( ++c3_cpu_status.count == num_online_cpus() ) { /* * All CPUs are trying to go to C3 * Disable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); } spin_unlock(&c3_cpu_status.lock); } else if ( !power->flags.bm_check ) { /* SMP with no shared cache... Invalidate cache */ ACPI_FLUSH_CPU_CACHE(); } /* Invoke C3 */ acpi_idle_do_entry(cx); if ( (cx->type == ACPI_STATE_C3) && power->flags.bm_check && power->flags.bm_control ) { /* Enable bus master arbitration */ spin_lock(&c3_cpu_status.lock); acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_status.count--; spin_unlock(&c3_cpu_status.lock); } /* Get end time (ticks) */ t2 = get_tick(); /* recovering TSC */ cstate_restore_tsc(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Update statistics */ acpi_update_idle_stats(power, cx, ticks_elapsed(t1, t2)); /* Re-enable interrupts */ local_irq_enable(); /* recovering APIC */ lapic_timer_on(); break; default: /* Now in C0 */ power->last_state = &power->states[0]; local_irq_enable(); sched_tick_resume(); cpufreq_dbs_timer_resume(); return; } /* Now in C0 */ power->last_state = &power->states[0]; sched_tick_resume(); cpufreq_dbs_timer_resume(); if ( cpuidle_current_governor->reflect ) cpuidle_current_governor->reflect(power); }
static int __init pvh_setup_p2m(struct domain *d) { struct vcpu *v = d->vcpu[0]; unsigned long nr_pages; unsigned int i; int rc; bool preempted; #define MB1_PAGES PFN_DOWN(MB(1)) nr_pages = dom0_compute_nr_pages(d, NULL, 0); pvh_setup_e820(d, nr_pages); do { preempted = false; paging_set_allocation(d, dom0_paging_pages(d, nr_pages), &preempted); process_pending_softirqs(); } while ( preempted ); /* * Memory below 1MB is identity mapped. * NB: this only makes sense when booted from legacy BIOS. */ rc = modify_identity_mmio(d, 0, MB1_PAGES, true); if ( rc ) { printk("Failed to identity map low 1MB: %d\n", rc); return rc; } /* Populate memory map. */ for ( i = 0; i < d->arch.nr_e820; i++ ) { unsigned long addr, size; if ( d->arch.e820[i].type != E820_RAM ) continue; addr = PFN_DOWN(d->arch.e820[i].addr); size = PFN_DOWN(d->arch.e820[i].size); if ( addr >= MB1_PAGES ) rc = pvh_populate_memory_range(d, addr, size); else { ASSERT(addr + size < MB1_PAGES); pvh_steal_low_ram(d, addr, size); } if ( rc ) return rc; } if ( cpu_has_vmx && paging_mode_hap(d) && !vmx_unrestricted_guest(v) ) { /* * Since Dom0 cannot be migrated, we will only setup the * unrestricted guest helpers if they are needed by the current * hardware we are running on. */ rc = pvh_setup_vmx_realmode_helpers(d); if ( rc ) return rc; } return 0; #undef MB1_PAGES }
/* Bring up a remote CPU */ int __cpu_up(unsigned int cpu) { int rc; s_time_t deadline; printk("Bringing up CPU%d\n", cpu); rc = init_secondary_pagetables(cpu); if ( rc < 0 ) return rc; console_start_sync(); /* Secondary may use early_printk */ /* Tell the remote CPU which stack to boot on. */ init_data.stack = idle_vcpu[cpu]->arch.stack; /* Tell the remote CPU what its logical CPU ID is. */ init_data.cpuid = cpu; /* Open the gate for this CPU */ smp_up_cpu = cpu_logical_map(cpu); clean_dcache(smp_up_cpu); rc = arch_cpu_up(cpu); console_end_sync(); if ( rc < 0 ) { printk("Failed to bring up CPU%d\n", cpu); return rc; } deadline = NOW() + MILLISECS(1000); while ( !cpu_online(cpu) && NOW() < deadline ) { cpu_relax(); process_pending_softirqs(); } /* * Nuke start of day info before checking one last time if the CPU * actually came online. If it is not online it may still be * trying to come up and may show up later unexpectedly. * * This doesn't completely avoid the possibility of the supposedly * failed CPU trying to progress with another CPUs stack settings * etc, but better than nothing, hopefully. */ init_data.stack = NULL; init_data.cpuid = ~0; smp_up_cpu = MPIDR_INVALID; clean_dcache(smp_up_cpu); if ( !cpu_online(cpu) ) { printk("CPU%d never came online\n", cpu); return -EIO; } return 0; }