Esempio n. 1
0
static void __init
do_boot_cpu (int sapicid)
{
	struct task_struct *idle;
	int timeout, cpu;

	cpu = ++cpucount;
	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	if (fork_by_hand() < 0)
		panic("failed fork for CPU %d", cpu);

	/*
	 * We remove it from the pidhash and the runqueue
	 * once we got the process:
	 */
	idle = init_task.prev_task;
	if (!idle)
		panic("No idle process for CPU %d", cpu);

	task_set_cpu(idle, cpu);	/* we schedule the first task manually */

	ia64_cpu_to_sapicid[cpu] = sapicid;

	del_from_runqueue(idle);
	unhash_process(idle);
	init_tasks[cpu] = idle;

	Dprintk("Sending wakeup vector %u to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);

	platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);

	/*
	 * Wait 10s total for the AP to start
	 */
	Dprintk("Waiting on callin_map ...");
	for (timeout = 0; timeout < 100000; timeout++) {
		if (test_bit(cpu, &cpu_callin_map))
			break;  /* It has booted */
		udelay(100);
	}
	Dprintk("\n");

	if (test_bit(cpu, &cpu_callin_map)) {
		/* number CPUs logically, starting from 1 (BSP is 0) */
		printk("CPU%d: ", cpu);
		/*print_cpu_info(&cpu_data[cpu]); */
		printk("CPU has booted.\n");
	} else {
		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
		ia64_cpu_to_sapicid[cpu] = -1;
		cpucount--;
	}
}
Esempio n. 2
0
task_t * __devinit fork_idle(int cpu)
{
	task_t *task;
	struct pt_regs regs;

	task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
	if (!task)
		return ERR_PTR(-ENOMEM);
	init_idle(task, cpu);
	unhash_process(task);
	return task;
}
Esempio n. 3
0
static void release_task(struct task_struct * p)
{
    if (p != current) {
#ifdef CONFIG_SMP
        /*
         * Wait to make sure the process isn't on the
         * runqueue (active on some other CPU still)
         */
        for (;;) {
            task_lock(p);
            if (!task_has_cpu(p))
                break;
            task_unlock(p);
            do {
                cpu_relax();
                barrier();
            } while (task_has_cpu(p));
        }
        task_unlock(p);
#endif
        atomic_dec(&p->user->processes);
        security_task_free(p);
        free_uid(p->user);
        unhash_process(p);

        release_thread(p);
        current->cmin_flt += p->min_flt + p->cmin_flt;
        current->cmaj_flt += p->maj_flt + p->cmaj_flt;
        current->cnswap += p->nswap + p->cnswap;
        /*
         * Potentially available timeslices are retrieved
         * here - this way the parent does not get penalized
         * for creating too many processes.
         *
         * (this cannot be used to artificially 'generate'
         * timeslices, because any timeslice recovered here
         * was given away by the parent in the first place.)
         */
        current->counter += p->counter;
        if (current->counter >= MAX_COUNTER)
            current->counter = MAX_COUNTER;
        p->pid = 0;
        free_task_struct(p);
    } else {
        printk("task releasing itself\n");
    }
}
Esempio n. 4
0
static void release_task(struct task_struct * p)
{
	if (p == current)
		BUG();
#ifdef CONFIG_SMP
	wait_task_inactive(p);
#endif
	atomic_dec(&p->user->processes);
	free_uid(p->user);
	unhash_process(p);

	release_thread(p);
	current->cmin_flt += p->min_flt + p->cmin_flt;
	current->cmaj_flt += p->maj_flt + p->cmaj_flt;
	current->cnswap += p->nswap + p->cnswap;
	sched_exit(p);
	p->pid = 0;
	free_task_struct(p);
}
Esempio n. 5
0
/*
 * Bring one cpu online.
 */
int __init smp_boot_one_cpu(int cpuid, int cpunum)
{
	struct task_struct *idle;
	long timeout;

	/* 
	 * Create an idle task for this CPU.  Note the address wed* give 
	 * to kernel_thread is irrelevant -- it's going to start
	 * where OS_BOOT_RENDEVZ vector in SAL says to start.  But
	 * this gets all the other task-y sort of data structures set
	 * up like we wish.   We need to pull the just created idle task 
	 * off the run queue and stuff it into the init_tasks[] array.  
	 * Sheesh . . .
	 */

	idle = fork_by_hand();
	if (IS_ERR(idle))
		panic("SMP: fork failed for CPU:%d", cpuid);

	wake_up_forked_process(idle);
	init_idle(idle, cpunum);
	unhash_process(idle);
	idle->thread_info->cpu = cpunum;

	/* Let _start know what logical CPU we're booting
	** (offset into init_tasks[],cpu_data[])
	*/
	cpu_now_booting = cpunum;

	/* 
	** boot strap code needs to know the task address since
	** it also contains the process stack.
	*/
	smp_init_current_idle_task = idle ;
	mb();

	/*
	** This gets PDC to release the CPU from a very tight loop.
	** See MEM_RENDEZ comments in head.S.
	*/
	__raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
	mb();

	/* 
	 * OK, wait a bit for that CPU to finish staggering about. 
	 * Slave will set a bit when it reaches smp_cpu_init().
	 * Once the "monarch CPU" sees the bit change, it can move on.
	 */
	for (timeout = 0; timeout < 10000; timeout++) {
		if(cpu_online(cpunum)) {
			/* Which implies Slave has started up */
			cpu_now_booting = 0;
			smp_init_current_idle_task = NULL;
			goto alive ;
		}
		udelay(100);
		barrier();
	}

	put_task_struct(idle);
	idle = NULL;

	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
	return -1;

alive:
	/* Remember the Slave data */
#if (kDEBUG>=100)
	printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
		cpuid,  cpunum, timeout * 100);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
	cpu_data[cpunum].state = STATE_RUNNING;
#endif
	return 0;
}
Esempio n. 6
0
void __init smp_boot_cpus(void)
{
	extern struct task_struct *current_set[NR_CPUS];
	int i, cpu_nr;
	struct task_struct *p;

	printk("Entering SMP Mode...\n");
	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	for (i = 0; i < NR_CPUS; i++) {
		prof_counter[i] = 1;
		prof_multiplier[i] = 1;
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	smp_ops = ppc_md.smp_ops;
	if (smp_ops == NULL) {
		printk("SMP not supported on this machine.\n");
		return;
	}

#ifndef CONFIG_750_SMP
	/* check for 750's, they just don't work with linux SMP.
	 * If you actually have 750 SMP hardware and want to try to get
	 * it to work, send me a patch to make it work and
	 * I'll make CONFIG_750_SMP a config option.  -- Troy ([email protected])
	 */
	if ( PVR_VER(mfspr(PVR)) == 8 ){
		printk("SMP not supported on 750 cpus. %s line %d\n",
				__FILE__, __LINE__);
		return;
	}
#endif


	/* Probe arch for CPUs */
	cpu_nr = smp_ops->probe();

	/* Backup CPU 0 state */
	__save_cpu_setup();
	
	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;
	for (i = 1; i < cpu_nr; i++) {
		int c;
		struct pt_regs regs;
		
		/* create a process for the processor */
		/* only regs.msr is actually used, and 0 is OK for it */
		memset(&regs, 0, sizeof(struct pt_regs));
		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);
		init_idle(p, i);

		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i] = p;

		/*
		 * There was a cache flush loop here to flush the cache
		 * to memory for the first 8MB of RAM.  The cache flush
		 * has been pushed into the kick_cpu function for those
		 * platforms that need it.
		 */

		/* wake up cpus */
		smp_ops->kick_cpu(i);
		
		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 10000; c && !cpu_callin_map[i] ; c-- )
			udelay(100);
		
		if ( cpu_callin_map[i] )
		{
			char buf[32];
			sprintf(buf, "found cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
			printk("Processor %d found.\n", i);
			smp_num_cpus++;
		} else {
			char buf[32];
			sprintf(buf, "didn't find cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
			printk("Processor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	smp_ops->setup_cpu(0);
	
	if (smp_num_cpus < 2)
		smp_tb_synchronized = 1;
}
Esempio n. 7
0
void __init smp_boot_cpus(void)
{
	extern struct task_struct *current_set[NR_CPUS];
	int i, cpu_nr;
	struct task_struct *p;

	printk("Entering SMP Mode...\n");
	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	init_idle();

	for (i = 0; i < NR_CPUS; i++) {
		prof_counter[i] = 1;
		prof_multiplier[i] = 1;
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	smp_ops = ppc_md.smp_ops;
	if (smp_ops == NULL) {
		printk("SMP not supported on this machine.\n");
		return;
	}

	/* Probe arch for CPUs */
	cpu_nr = smp_ops->probe();

	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;
	for (i = 1; i < cpu_nr; i++) {
		int c;
		struct pt_regs regs;
		
		/* create a process for the processor */
		/* we don't care about the values in regs since we'll
		   never reschedule the forked task. */
		/* We DO care about one bit in the pt_regs we
		   pass to do_fork.  That is the MSR_FP bit in 
		   regs.msr.  If that bit is on, then do_fork
		   (via copy_thread) will call giveup_fpu.
		   giveup_fpu will get a pointer to our (current's)
		   last register savearea via current->thread.regs 
		   and using that pointer will turn off the MSR_FP,
		   MSR_FE0 and MSR_FE1 bits.  At this point, this 
		   pointer is pointing to some arbitrary point within
		   our stack. */

		memset(&regs, 0, sizeof(struct pt_regs));
		
		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);
		del_from_runqueue(p);
		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i] = p;

		/*
		 * There was a cache flush loop here to flush the cache
		 * to memory for the first 8MB of RAM.  The cache flush
		 * has been pushed into the kick_cpu function for those
		 * platforms that need it.
		 */

		/* wake up cpus */
		smp_ops->kick_cpu(i);
		
		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 1000; c && !cpu_callin_map[i] ; c-- )
			udelay(100);
		
		if ( cpu_callin_map[i] )
		{
			char buf[32];
			sprintf(buf, "found cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
			printk("Processor %d found.\n", i);
			smp_num_cpus++;
		} else {
			char buf[32];
			sprintf(buf, "didn't find cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
			printk("Processor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	smp_ops->setup_cpu(0);
	
	if (smp_num_cpus < 2)
		smp_tb_synchronized = 1;
}
Esempio n. 8
0
void __init smp_boot_cpus(void)
{
	int i;

	smp_num_cpus = prom_setup_smp();
	init_new_context(current, &init_mm);
	current->processor = 0;
	atomic_set(&cpus_booted, 1);  /* Master CPU is already booted... */
	init_idle();
	for (i = 1; i < smp_num_cpus; i++) {
		struct task_struct *p;
		struct pt_regs regs;
		printk("Starting CPU %d... ", i);

		/* Spawn a new process normally.  Grab a pointer to
		   its task struct so we can mess with it */
		do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0);
		p = init_task.prev_task;

		/* Schedule the first task manually */
		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */

		/* Attach to the address space of init_task. */
		atomic_inc(&init_mm.mm_count);
		p->active_mm = &init_mm;
		init_tasks[i] = p;

		del_from_runqueue(p);
		unhash_process(p);

		prom_boot_secondary(i,
				    (unsigned long)p + KERNEL_STACK_SIZE - 32,
				    (unsigned long)p);

#if 0

		/* This is copied from the ip-27 code in the mips64 tree */

		struct task_struct *p;

		/*
		 * The following code is purely to make sure
		 * Linux can schedule processes on this slave.
		 */
		kernel_thread(0, NULL, CLONE_PID);
		p = init_task.prev_task;
		sprintf(p->comm, "%s%d", "Idle", i);
		init_tasks[i] = p;
		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually *
		del_from_runqueue(p);
		unhash_process(p);
		/* Attach to the address space of init_task. */
		atomic_inc(&init_mm.mm_count);
		p->active_mm = &init_mm;
		prom_boot_secondary(i, 
				    (unsigned long)p + KERNEL_STACK_SIZE - 32,
				    (unsigned long)p);
#endif
	}

	/* Wait for everyone to come up */
	while (atomic_read(&cpus_booted) != smp_num_cpus);
}
Esempio n. 9
0
void __init smp_boot_cpus(void)
{
	extern struct current_set_struct current_set[];
	extern void __secondary_start_chrp(void);
	int i, cpu_nr;
	struct task_struct *p;
	unsigned long sp;

	printk("Entering SMP Mode...\n");
	PPCDBG(PPCDBG_SMP, "smp_boot_cpus: start.  NR_CPUS = 0x%lx\n", NR_CPUS);

	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	init_idle();

	for (i = 0; i < NR_CPUS; i++) {
		paca[i].prof_counter = 1;
		paca[i].prof_multiplier = 1;
		if(i != 0) {
		        /*
			 * Processor 0's segment table is statically 
			 * initialized to real address STAB0_PHYS_ADDR.  The
			 * Other processor's tables are created and
			 * initialized here.
			 */
			paca[i].xStab_data.virt = (unsigned long)&stab_array[PAGE_SIZE * (i-1)];
			memset((void *)paca[i].xStab_data.virt, 0, PAGE_SIZE); 
			paca[i].xStab_data.real = __v2a(paca[i].xStab_data.virt);
			paca[i].default_decr = tb_ticks_per_jiffy / decr_overclock;
		}
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	/* Probe arch for CPUs */
	cpu_nr = ppc_md.smp_probe();

	printk("Probe found %d CPUs\n", cpu_nr);

	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;

#ifdef CONFIG_PPC_ISERIES
	smp_space_timers( cpu_nr );
#endif

	printk("Waiting for %d CPUs\n", cpu_nr-1);

	for ( i = 1 ; i < cpu_nr; i++ ) {
		int c;
		struct pt_regs regs;
		
		/* create a process for the processor */
		/* we don't care about the values in regs since we'll
		   never reschedule the forked task. */
		/* We DO care about one bit in the pt_regs we
		   pass to do_fork.  That is the MSR_FP bit in
		   regs.msr.  If that bit is on, then do_fork
		   (via copy_thread) will call giveup_fpu.
		   giveup_fpu will get a pointer to our (current's)
		   last register savearea via current->thread.regs
		   and using that pointer will turn off the MSR_FP,
		   MSR_FE0 and MSR_FE1 bits.  At this point, this
		   pointer is pointing to some arbitrary point within
		   our stack */

		memset(&regs, 0, sizeof(struct pt_regs));

		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);

		PPCDBG(PPCDBG_SMP,"\tProcessor %d, task = 0x%lx\n", i, p);

		del_from_runqueue(p);
		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i].task = p;
		sp = ((unsigned long)p) + sizeof(union task_union)
			- STACK_FRAME_OVERHEAD;
		current_set[i].sp_real = (void *)__v2a(sp);

		/* wake up cpus */
		ppc_md.smp_kick_cpu(i);

		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 5000; c && !cpu_callin_map[i] ; c-- ) {
			udelay(100);
		}
		
		if ( cpu_callin_map[i] )
		{
			printk("Processor %d found.\n", i);
			PPCDBG(PPCDBG_SMP, "\tProcessor %d found.\n", i);
			/* this sync's the decr's -- Cort */
			smp_num_cpus++;
		} else {
			printk("Processor %d is stuck.\n", i);
			PPCDBG(PPCDBG_SMP, "\tProcessor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	ppc_md.smp_setup_cpu(0);
	
	if (smp_num_cpus < 2) {
	        tb_last_stamp = get_tb();
		smp_tb_synchronized = 1;
	}
}