예제 #1
0
파일: smp.c 프로젝트: bsingharora/linux
asmlinkage void start_secondary(void)
{
	unsigned int cpu = smp_processor_id();
	struct mm_struct *mm = &init_mm;

	enable_mmu();
	mmgrab(mm);
	mmget(mm);
	current->active_mm = mm;
#ifdef CONFIG_MMU
	enter_lazy_tlb(mm, current);
	local_flush_tlb_all();
#endif

	per_cpu_trap_init();

	preempt_disable();

	notify_cpu_starting(cpu);

	local_irq_enable();

	calibrate_delay();

	smp_store_cpu_info(cpu);

	set_cpu_online(cpu, true);
	per_cpu(cpu_state, cpu) = CPU_ONLINE;

	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
예제 #2
0
파일: smp.c 프로젝트: janrinze/loox7xxport
asmlinkage void __cpuinit start_secondary(void)
{
	unsigned int cpu;
	struct mm_struct *mm = &init_mm;

	atomic_inc(&mm->mm_count);
	atomic_inc(&mm->mm_users);
	current->active_mm = mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(mm, current);

	per_cpu_trap_init();

	preempt_disable();

	local_irq_enable();

	calibrate_delay();

	cpu = smp_processor_id();
	smp_store_cpu_info(cpu);

	cpu_set(cpu, cpu_online_map);

	cpu_idle();
}
예제 #3
0
파일: smp.c 프로젝트: 0-T-0/ps4-linux
asmlinkage void start_secondary(void)
{
	unsigned int cpu = smp_processor_id();
	struct mm_struct *mm = &init_mm;

	enable_mmu();
	atomic_inc(&mm->mm_count);
	atomic_inc(&mm->mm_users);
	current->active_mm = mm;
	enter_lazy_tlb(mm, current);
	local_flush_tlb_all();

	per_cpu_trap_init();

	preempt_disable();

	notify_cpu_starting(cpu);

	local_irq_enable();

	/* Enable local timers */
	local_timer_setup(cpu);
	calibrate_delay();

	smp_store_cpu_info(cpu);

	set_cpu_online(cpu, true);
	per_cpu(cpu_state, cpu) = CPU_ONLINE;

	cpu_startup_entry(CPUHP_ONLINE);
}
예제 #4
0
/*
 * Called by secondaries to update state and initialize CPU registers.
 */
static void __init
smp_cpu_init(int cpunum)
{
	extern int init_per_cpu(int);  /* arch/parisc/kernel/setup.c */
	extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */

	/* Set modes and Enable floating point coprocessor */
	(void) init_per_cpu(cpunum);

	disable_sr_hashing();

	mb();

	/* Well, support 2.4 linux scheme as well. */
	if (cpu_test_and_set(cpunum, cpu_online_map))
	{
		extern void machine_halt(void); /* arch/parisc.../process.c */

		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
		machine_halt();
	}  

	/* Initialise the idle task for this CPU */
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	if(current->mm)
		BUG();
	enter_lazy_tlb(&init_mm, current);

	init_IRQ();   /* make sure no IRQ's are enabled or pending */
}
예제 #5
0
/*
 * We can use these to temporarily drop into
 * "lazy TLB" mode and back.
 */
struct mm_struct * start_lazy_tlb(void)
{
	struct mm_struct *mm = current->mm;
	current->mm = NULL;
	/* active_mm is still 'mm' */
	atomic_inc(&mm->mm_count);
	enter_lazy_tlb(mm, current, smp_processor_id());
	return mm;
}
예제 #6
0
/*
 * cpu_init - initializes state that is per-CPU.
 */
void cpu_init(void)
{
	struct cpuid *id = &__get_cpu_var(cpu_id);

	get_cpu_id(id);
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(&init_mm, current);
}
예제 #7
0
/*
 * cpu_init - initializes state that is per-CPU.
 */
void __cpuinit cpu_init(void)
{
	struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());

	get_cpu_id(id);
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(&init_mm, current);
}
예제 #8
0
/*
 * unuse_mm
 *	Reverses the effect of use_mm, i.e. releases the
 *	specified mm context which was earlier taken on
 *	by the calling kernel thread
 *	(Note: this routine is intended to be called only
 *	from a kernel thread context)
 */
void unuse_mm(struct mm_struct *mm)
{
	struct task_struct *tsk = current;

	task_lock(tsk);
	sync_mm_rss(tsk, mm);
	tsk->mm = NULL;
	/* active_mm is still 'mm' */
	enter_lazy_tlb(mm, tsk);
	task_unlock(tsk);
}
예제 #9
0
/*
 * cpu_init() initializes state that is per-CPU.
 */
void __cpuinit cpu_init(void)
{
        /*
         * Store processor id in lowcore (used e.g. in timer_interrupt)
         */
	get_cpu_id(&S390_lowcore.cpu_id);

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
}
예제 #10
0
/*
 * cpu_init - initializes state that is per-CPU.
 */
void __cpuinit cpu_init(void)
{
	struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
	struct cpuid *id = &__get_cpu_var(cpu_id);

	get_cpu_id(id);
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(&init_mm, current);
	memset(idle, 0, sizeof(*idle));
}
예제 #11
0
/*
 * We can use these to temporarily drop into
 * "lazy TLB" mode and back.
 */
struct mm_struct * start_lazy_tlb(void)
{
    struct mm_struct *mm = current->mm;
#ifdef CONFIG_PREEMPT
    if (preempt_is_disabled() == 0)
        BUG();
#endif
    current->mm = NULL;
    /* active_mm is still 'mm' */
    atomic_inc(&mm->mm_count);
    enter_lazy_tlb(mm, current, smp_processor_id());
    return mm;
}
예제 #12
0
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
{
    struct mm_struct *mm, *oldmm;

    prepare_task_switch(rq, prev, next);

    mm = next->mm;
    oldmm = prev->active_mm;
    /*
     * For paravirt, this is coupled with an exit in switch_to to
     * combine the page table reload and the switch backend into
     * one hypercall.
     */
    arch_start_context_switch(prev);

    if (!mm) {
        next->active_mm = oldmm;
        atomic_inc(&oldmm->mm_count);
        enter_lazy_tlb(oldmm, next);
    } else
        switch_mm(oldmm, mm, next);

    if (!prev->mm) {
        prev->active_mm = NULL;
        rq->prev_mm = oldmm;
    }
    /*
     * Since the runqueue lock will be released by the next
     * task (which is an invalid locking op but in the case
     * of the scheduler it's an obvious special-case), so we
     * do an early lockdep release here:
     */
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
    spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif

    /* Here we just switch the register state and the stack. */
    switch_to(prev, next, prev);

    barrier();
    /*
     * this_rq must be evaluated again because prev may have moved
     * CPUs since it called schedule(), thus the 'rq' on its stack
     * frame will be invalid.
     */
    finish_task_switch(this_rq(), prev);
}
예제 #13
0
파일: smp.c 프로젝트: AlexShiLucky/linux
void secondary_start_kernel(void)
{
	struct mm_struct *mm = &init_mm;
	unsigned int cpu = smp_processor_id();

	init_mmu();

#ifdef CONFIG_DEBUG_KERNEL
	if (boot_secondary_processors == 0) {
		pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
			__func__, boot_secondary_processors, cpu);
		for (;;)
			__asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
	}

	pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
		__func__, boot_secondary_processors, cpu);
#endif
	/* Init EXCSAVE1 */

	secondary_trap_init();

	/* All kernel threads share the same mm context. */

	mmget(mm);
	mmgrab(mm);
	current->active_mm = mm;
	cpumask_set_cpu(cpu, mm_cpumask(mm));
	enter_lazy_tlb(mm, current);

	preempt_disable();
	trace_hardirqs_off();

	calibrate_delay();

	notify_cpu_starting(cpu);

	secondary_init_irq();
	local_timer_setup(cpu);

	set_cpu_online(cpu, true);

	local_irq_enable();

	complete(&cpu_running);

	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
예제 #14
0
/*
 * Turn us into a lazy TLB process if we
 * aren't already..
 */
static inline void __exit_mm(struct task_struct * tsk)
{
	struct mm_struct * mm = tsk->mm;

	mm_release();
	if (mm) {
		atomic_inc(&mm->mm_count);
		BUG_ON(mm != tsk->active_mm);
		/* more a memory barrier than a real lock */
		task_lock(tsk);
		tsk->mm = NULL;
		task_unlock(tsk);
		enter_lazy_tlb(mm, current, smp_processor_id());
		mmput(mm);
	}
}
예제 #15
0
파일: setup.c 프로젝트: 710leo/LVS
/*
 * cpu_init() initializes state that is per-CPU.
 */
void __cpuinit cpu_init(void)
{
        /*
         * Store processor id in lowcore (used e.g. in timer_interrupt)
         */
	get_cpu_id(&S390_lowcore.cpu_id);

        /*
         * Force FPU initialization:
         */
        clear_thread_flag(TIF_USEDFPU);
        clear_used_math();

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
}
예제 #16
0
/*
 * Activate a secondary processor.  Very minimal; don't add anything
 * to this path without knowing what you're doing, since SMP booting
 * is pretty fragile.
 */
static void start_secondary(void)
{
	int cpuid;

	preempt_disable();

	cpuid = smp_processor_id();

	/* Set our thread pointer appropriately. */
	set_my_cpu_offset(__per_cpu_offset[cpuid]);

	/*
	 * In large machines even this will slow us down, since we
	 * will be contending for for the printk spinlock.
	 */
	/* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */

	/* Initialize the current asid for our first page table. */
	__this_cpu_write(current_asid, min_asid);

	/* Set up this thread as another owner of the init_mm */
	mmgrab(&init_mm);
	current->active_mm = &init_mm;
	if (current->mm)
		BUG();
	enter_lazy_tlb(&init_mm, current);

	/* Allow hypervisor messages to be received */
	init_messaging();
	local_irq_enable();

	/* Indicate that we're ready to come up. */
	/* Must not do this before we're ready to receive messages */
	if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
		pr_warn("CPU#%d already started!\n", cpuid);
		for (;;)
			local_irq_enable();
	}

	smp_nap();
}
예제 #17
0
/*
 * cpu_init() initializes state that is per-CPU.
 */
void __devinit cpu_init (void)
{
        int addr = hard_smp_processor_id();

        /*
         * Store processor id in lowcore (used e.g. in timer_interrupt)
         */
	get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
        S390_lowcore.cpu_data.cpu_addr = addr;

        /*
         * Force FPU initialization:
         */
        clear_thread_flag(TIF_USEDFPU);
        clear_used_math();

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
        if (current->mm)
                BUG();
        enter_lazy_tlb(&init_mm, current);
}
예제 #18
0
/*
 * Called by secondaries to update state and initialize CPU registers.
 */
static void __init
smp_cpu_init(int cpunum)
{
	extern int init_per_cpu(int);  /* arch/parisc/kernel/processor.c */
	extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
	extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */

	/* Set modes and Enable floating point coprocessor */
	(void) init_per_cpu(cpunum);

	disable_sr_hashing();

	mb();

	/* Well, support 2.4 linux scheme as well. */
	if (cpu_online(cpunum))	{
		extern void machine_halt(void); /* arch/parisc.../process.c */

		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
		machine_halt();
	}

	notify_cpu_starting(cpunum);

	ipi_call_lock();
	set_cpu_online(cpunum, true);
	ipi_call_unlock();

	/* Initialise the idle task for this CPU */
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(&init_mm, current);

	init_IRQ();   /* make sure no IRQs are enabled or pending */
	start_cpu_itimer();
}