Ejemplo n.º 1
0
/*
 * Called by secondaries to update state and initialize CPU registers.
 */
static void __init
smp_cpu_init(int cpunum)
{
	extern int init_per_cpu(int);  /* arch/parisc/kernel/setup.c */
	extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */

	/* Set modes and Enable floating point coprocessor */
	(void) init_per_cpu(cpunum);

	disable_sr_hashing();

	mb();

	/* Well, support 2.4 linux scheme as well. */
	if (cpu_test_and_set(cpunum, cpu_online_map))
	{
		extern void machine_halt(void); /* arch/parisc.../process.c */

		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
		machine_halt();
	}  

	/* Initialise the idle task for this CPU */
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	if(current->mm)
		BUG();
	enter_lazy_tlb(&init_mm, current);

	init_IRQ();   /* make sure no IRQ's are enabled or pending */
}
Ejemplo n.º 2
0
static __cpuinit int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;

	if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_table(cpu);

	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ds = __KERNEL_DS;
	ctxt->user_regs.es = __KERNEL_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
#endif
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);
	make_lowmem_page_readonly(gdt);

	ctxt->gdt_frames[0] = virt_to_mfn(gdt);
	ctxt->gdt_ents      = GDT_ENTRIES;

	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);

	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = idle->thread.sp0;

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#endif
	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
Ejemplo n.º 3
0
/* Save the registers in the per-cpu crash note buffer. */
void kexec_crash_save_cpu(void)
{
    int cpu = smp_processor_id();
    Elf_Note *note = per_cpu(crash_notes, cpu);
    ELF_Prstatus *prstatus;
    crash_xen_core_t *xencore;

    if ( cpu_test_and_set(cpu, crash_saved_cpus) )
        return;

    prstatus = (ELF_Prstatus *)ELFNOTE_DESC(note);

    note = ELFNOTE_NEXT(note);
    xencore = (crash_xen_core_t *)ELFNOTE_DESC(note);

    elf_core_save_regs(&prstatus->pr_reg, xencore);
}
Ejemplo n.º 4
0
static int idle_proc(void *cpup)
{
	int cpu = (int) cpup, err;

	err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
	if (err < 0)
		panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);

	os_set_fd_async(cpu_data[cpu].ipi_pipe[0]);

	wmb();
	if (cpu_test_and_set(cpu, cpu_callin_map)) {
		printk(KERN_ERR "huh, CPU#%d already present??\n", cpu);
		BUG();
	}

	while (!cpu_isset(cpu, smp_commenced_mask))
		cpu_relax();

	notify_cpu_starting(cpu);
<<<<<<< HEAD
Ejemplo n.º 5
0
/* Set up the single Xen-specific-info crash note. */
crash_xen_info_t *kexec_crash_save_info(void)
{
    int cpu = smp_processor_id();
    crash_xen_info_t info;
    crash_xen_info_t *out = (crash_xen_info_t *)ELFNOTE_DESC(xen_crash_note);

    BUG_ON(!cpu_test_and_set(cpu, crash_saved_cpus));

    memset(&info, 0, sizeof(info));
    info.xen_major_version = xen_major_version();
    info.xen_minor_version = xen_minor_version();
    info.xen_extra_version = __pa(xen_extra_version());
    info.xen_changeset = __pa(xen_changeset());
    info.xen_compiler = __pa(xen_compiler());
    info.xen_compile_date = __pa(xen_compile_date());
    info.xen_compile_time = __pa(xen_compile_time());
    info.tainted = tainted;

    /* Copy from guaranteed-aligned local copy to possibly-unaligned dest. */
    memcpy(out, &info, sizeof(info));

    return out;
}
Ejemplo n.º 6
0
unsigned long ipipe_critical_enter(void (*syncfn)(void))
{
	int cpu __maybe_unused, n __maybe_unused;
	unsigned long flags, loops __maybe_unused;
	cpumask_t allbutself __maybe_unused;

	flags = hard_local_irq_save();

	if (num_online_cpus() == 1)
		return flags;

#ifdef CONFIG_SMP

	cpu = ipipe_processor_id();
	if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) {
		while (test_and_set_bit(0, &__ipipe_critical_lock)) {
			n = 0;
			hard_local_irq_enable();

			do
				cpu_relax();
			while (++n < cpu);

			hard_local_irq_disable();
		}
restart:
		spin_lock(&__ipipe_cpu_barrier);

		__ipipe_cpu_sync = syncfn;

		cpus_clear(__ipipe_cpu_pass_map);
		cpu_set(cpu, __ipipe_cpu_pass_map);

		/*
		 * Send the sync IPI to all processors but the current
		 * one.
		 */
		cpus_andnot(allbutself, cpu_online_map, __ipipe_cpu_pass_map);
		ipipe_send_ipi(IPIPE_CRITICAL_IPI, allbutself);
		loops = IPIPE_CRITICAL_TIMEOUT;

		while (!cpus_equal(__ipipe_cpu_sync_map, allbutself)) {
			if (--loops > 0) {
				cpu_relax();
				continue;
			}
			/*
			 * We ran into a deadlock due to a contended
			 * rwlock. Cancel this round and retry.
			 */
			__ipipe_cpu_sync = NULL;

			spin_unlock(&__ipipe_cpu_barrier);
			/*
			 * Ensure all CPUs consumed the IPI to avoid
			 * running __ipipe_cpu_sync prematurely. This
			 * usually resolves the deadlock reason too.
			 */
			while (!cpus_equal(cpu_online_map, __ipipe_cpu_pass_map))
				cpu_relax();

			goto restart;
		}
	}

	atomic_inc(&__ipipe_critical_count);

#endif	/* CONFIG_SMP */

	return flags;
}