static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) BUG(); xen_cpu_initialized_map = cpumask_of_cpu(0); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) continue; cpu_clear(cpu, cpu_possible_map); } for_each_possible_cpu (cpu) { struct task_struct *idle; if (cpu == 0) continue; idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); cpu_set(cpu, cpu_present_map); } }
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) { native_smp_prepare_cpus(max_cpus); WARN_ON(xen_smp_intr_init(0)); xen_init_lock_cpu(0); }
static int xen_cpu_up_prepare_hvm(unsigned int cpu) { int rc; /* * This can happen if CPU was offlined earlier and * offlining timed out in common_cpu_die(). */ if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) { xen_smp_intr_free(cpu); xen_uninit_lock_cpu(cpu); } if (cpu_acpi_id(cpu) != U32_MAX) per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); else per_cpu(xen_vcpu_id, cpu) = cpu; xen_vcpu_setup(cpu); if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock)) xen_setup_timer(cpu); rc = xen_smp_intr_init(cpu); if (rc) { WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n", cpu, rc); return rc; } return 0; }
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) { int rc; rc = native_cpu_up(cpu, tidle); WARN_ON (xen_smp_intr_init(cpu)); return rc; }
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) { int rc; rc = native_cpu_up(cpu); WARN_ON (xen_smp_intr_init(cpu)); return rc; }
static void xen_smp_intr_init_cpus(void *unused) { int i; for (i = 0; i < mp_ncpus; i++) xen_smp_intr_init(i); }
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) { native_smp_prepare_cpus(max_cpus); WARN_ON(xen_smp_intr_init(0)); /* FIXME: Disable until final solution is found (lp#838026) xen_init_lock_cpu(0); xen_init_spinlocks(); */ }
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) { native_smp_prepare_cpus(max_cpus); WARN_ON(xen_smp_intr_init(0)); if (!xen_have_vector_callback) return; xen_init_lock_cpu(0); xen_init_spinlocks(); }
static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; unsigned int i; if (skip_ioapic_setup) { char *m = (max_cpus == 0) ? "The nosmp parameter is incompatible with Xen; " \ "use Xen dom0_max_vcpus=1 parameter" : "The noapic parameter is incompatible with Xen"; xen_raw_printk(m); panic(m); } xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; for_each_possible_cpu(i) { zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) BUG(); if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) panic("could not allocate xen_cpu_initialized_map\n"); cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) continue; set_cpu_possible(cpu, false); } for_each_possible_cpu (cpu) { struct task_struct *idle; if (cpu == 0) continue; idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); set_cpu_present(cpu, true); } }
static int __cpuinit xen_cpu_up(unsigned int cpu) { struct task_struct *idle = idle_task(cpu); int rc; #ifdef CONFIG_X86_64 /* Allocate node local memory for AP pdas */ WARN_ON(cpu == 0); if (cpu > 0) { rc = get_local_pda(cpu); if (rc) return rc; } #endif #ifdef CONFIG_X86_32 init_gdt(cpu); per_cpu(current_task, cpu) = idle; irq_ctx_init(cpu); #else cpu_pda(cpu)->pcurrent = idle; clear_tsk_thread_flag(idle, TIF_FORK); #endif xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) alternatives_smp_switch(1); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, 0); barrier(); } return 0; }
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) { int rc; /* * xen_smp_intr_init() needs to run before native_cpu_up() * so that IPI vectors are set up on the booting CPU before * it is marked online in native_cpu_up(). */ rc = xen_smp_intr_init(cpu); WARN_ON(rc); if (!rc) rc = native_cpu_up(cpu); return rc; }
static int __cpuinit xen_cpu_up(unsigned int cpu) { struct task_struct *idle = idle_task(cpu); int rc; per_cpu(current_task, cpu) = idle; #ifdef CONFIG_X86_32 irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - KERNEL_STACK_OFFSET + THREAD_SIZE; per_cpu(kernel_stack8k, cpu) = (unsigned long)task_stack_page(idle) - KERNEL_STACK_OFFSET + THREAD_SIZE - 8192; #endif xen_setup_runstate_info(cpu); xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) alternatives_smp_switch(1); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, NULL); barrier(); } return 0; }
static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; unsigned int i; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; for_each_possible_cpu(i) { zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) BUG(); if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) panic("could not allocate xen_cpu_initialized_map\n"); cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) continue; set_cpu_possible(cpu, false); } for_each_possible_cpu (cpu) { struct task_struct *idle; if (cpu == 0) continue; idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); set_cpu_present(cpu, true); } }
void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; for_each_possible_cpu(cpu) { cpus_clear(per_cpu(cpu_sibling_map, cpu)); /* * cpu_core_ map will be zeroed when the per * cpu area is allocated. * * cpus_clear(per_cpu(cpu_core_map, cpu)); */ } smp_store_cpu_info(0); set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) BUG(); cpu_initialized_map = cpumask_of_cpu(0); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--) continue; cpu_clear(cpu, cpu_possible_map); } for_each_possible_cpu (cpu) { struct task_struct *idle; if (cpu == 0) continue; idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); cpu_set(cpu, cpu_present_map); } //init_xenbus_allowed_cpumask(); }
static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) { int rc; per_cpu(current_task, cpu) = idle; per_cpu(current_tinfo, cpu) = &idle->tinfo; #ifdef CONFIG_X86_32 irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; #endif xen_setup_runstate_info(cpu); xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) /* Just in case we booted with a single CPU. */ alternatives_enable_smp(); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, NULL); barrier(); } return 0; }
int __cpuinit xen_cpu_up(unsigned int cpu) { struct task_struct *idle = idle_task(cpu); int rc; #if 0 rc = cpu_up_check(cpu); if (rc) return rc; #endif init_gdt(cpu); per_cpu(current_task, cpu) = idle; irq_ctx_init(cpu); xen_setup_timer(cpu); /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) alternatives_smp_switch(1); rc = xen_smp_intr_init(cpu); if (rc) return rc; smp_store_cpu_info(cpu); set_cpu_sibling_map(cpu); /* This must be done before setting cpu_online_map */ wmb(); cpu_set(cpu, cpu_online_map); rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); return 0; }
void __cpuinit xen_cpu_init(void) { xen_smp_intr_init(); }
void xen_cpu_init(void) { xen_smp_intr_init(); }