static void __init
do_boot_cpu (int sapicid)
{
	struct task_struct *idle;
	int timeout, cpu;

	cpu = ++cpucount;
	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	if (fork_by_hand() < 0)
		panic("failed fork for CPU %d", cpu);

	/*
	 * We remove it from the pidhash and the runqueue
	 * once we got the process:
	 */
	idle = init_task.prev_task;
	if (!idle)
		panic("No idle process for CPU %d", cpu);

	task_set_cpu(idle, cpu);	/* we schedule the first task manually */

	ia64_cpu_to_sapicid[cpu] = sapicid;

	del_from_runqueue(idle);
	unhash_process(idle);
	init_tasks[cpu] = idle;

	Dprintk("Sending wakeup vector %u to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);

	platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);

	/*
	 * Wait 10s total for the AP to start
	 */
	Dprintk("Waiting on callin_map ...");
	for (timeout = 0; timeout < 100000; timeout++) {
		if (test_bit(cpu, &cpu_callin_map))
			break;  /* It has booted */
		udelay(100);
	}
	Dprintk("\n");

	if (test_bit(cpu, &cpu_callin_map)) {
		/* number CPUs logically, starting from 1 (BSP is 0) */
		printk("CPU%d: ", cpu);
		/*print_cpu_info(&cpu_data[cpu]); */
		printk("CPU has booted.\n");
	} else {
		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
		ia64_cpu_to_sapicid[cpu] = -1;
		cpucount--;
	}
}
void __init smp_boot_cpus(void)
{
	extern struct task_struct *current_set[NR_CPUS];
	int i, cpu_nr;
	struct task_struct *p;

	printk("Entering SMP Mode...\n");
	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	init_idle();

	for (i = 0; i < NR_CPUS; i++) {
		prof_counter[i] = 1;
		prof_multiplier[i] = 1;
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	smp_ops = ppc_md.smp_ops;
	if (smp_ops == NULL) {
		printk("SMP not supported on this machine.\n");
		return;
	}

#ifndef CONFIG_750_SMP
	/* check for 750's, they just don't work with linux SMP.
	 * If you actually have 750 SMP hardware and want to try to get
	 * it to work, send me a patch to make it work and
	 * I'll make CONFIG_750_SMP a config option.  -- Troy ([email protected])
	 */
	if ( PVR_VER(mfspr(PVR)) == 8 ){
		printk("SMP not supported on 750 cpus. %s line %d\n",
				__FILE__, __LINE__);
		return;
	}
#endif


	/* Probe arch for CPUs */
	cpu_nr = smp_ops->probe();

	/* Backup CPU 0 state */
	__save_cpu_setup();

	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;
	for (i = 1; i < cpu_nr; i++) {
		int c;
		struct pt_regs regs;

		/* create a process for the processor */
		/* only regs.msr is actually used, and 0 is OK for it */
		memset(&regs, 0, sizeof(struct pt_regs));
		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);
		del_from_runqueue(p);
		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i] = p;

		/*
		 * There was a cache flush loop here to flush the cache
		 * to memory for the first 8MB of RAM.  The cache flush
		 * has been pushed into the kick_cpu function for those
		 * platforms that need it.
		 */

		/* wake up cpus */
		smp_ops->kick_cpu(i);

		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 10000; c && !cpu_callin_map[i] ; c-- )
			udelay(100);

		if ( cpu_callin_map[i] )
		{
			char buf[32];
			sprintf(buf, "found cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
			printk("Processor %d found.\n", i);
			smp_num_cpus++;
		} else {
			char buf[32];
			sprintf(buf, "didn't find cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
			printk("Processor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	smp_ops->setup_cpu(0);

	if (smp_num_cpus < 2)
		smp_tb_synchronized = 1;
}
Exemple #3
0
void __init smp_boot_cpus(void)
{
	extern struct task_struct *current_set[NR_CPUS];
	int i, cpu_nr;
	struct task_struct *p;

	printk("Entering SMP Mode...\n");
	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	init_idle();

	for (i = 0; i < NR_CPUS; i++) {
		prof_counter[i] = 1;
		prof_multiplier[i] = 1;
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	smp_ops = ppc_md.smp_ops;
	if (smp_ops == NULL) {
		printk("SMP not supported on this machine.\n");
		return;
	}

	/* Probe arch for CPUs */
	cpu_nr = smp_ops->probe();

	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;
	for (i = 1; i < cpu_nr; i++) {
		int c;
		struct pt_regs regs;
		
		/* create a process for the processor */
		/* we don't care about the values in regs since we'll
		   never reschedule the forked task. */
		/* We DO care about one bit in the pt_regs we
		   pass to do_fork.  That is the MSR_FP bit in 
		   regs.msr.  If that bit is on, then do_fork
		   (via copy_thread) will call giveup_fpu.
		   giveup_fpu will get a pointer to our (current's)
		   last register savearea via current->thread.regs 
		   and using that pointer will turn off the MSR_FP,
		   MSR_FE0 and MSR_FE1 bits.  At this point, this 
		   pointer is pointing to some arbitrary point within
		   our stack. */

		memset(&regs, 0, sizeof(struct pt_regs));
		
		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);
		del_from_runqueue(p);
		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i] = p;

		/*
		 * There was a cache flush loop here to flush the cache
		 * to memory for the first 8MB of RAM.  The cache flush
		 * has been pushed into the kick_cpu function for those
		 * platforms that need it.
		 */

		/* wake up cpus */
		smp_ops->kick_cpu(i);
		
		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 1000; c && !cpu_callin_map[i] ; c-- )
			udelay(100);
		
		if ( cpu_callin_map[i] )
		{
			char buf[32];
			sprintf(buf, "found cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
			printk("Processor %d found.\n", i);
			smp_num_cpus++;
		} else {
			char buf[32];
			sprintf(buf, "didn't find cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
			printk("Processor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	smp_ops->setup_cpu(0);
	
	if (smp_num_cpus < 2)
		smp_tb_synchronized = 1;
}
Exemple #4
0
void __init smp_boot_cpus(void)
{
	int i;

	smp_num_cpus = prom_setup_smp();
	init_new_context(current, &init_mm);
	current->processor = 0;
	atomic_set(&cpus_booted, 1);  /* Master CPU is already booted... */
	init_idle();
	for (i = 1; i < smp_num_cpus; i++) {
		struct task_struct *p;
		struct pt_regs regs;
		printk("Starting CPU %d... ", i);

		/* Spawn a new process normally.  Grab a pointer to
		   its task struct so we can mess with it */
		do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0);
		p = init_task.prev_task;

		/* Schedule the first task manually */
		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */

		/* Attach to the address space of init_task. */
		atomic_inc(&init_mm.mm_count);
		p->active_mm = &init_mm;
		init_tasks[i] = p;

		del_from_runqueue(p);
		unhash_process(p);

		prom_boot_secondary(i,
				    (unsigned long)p + KERNEL_STACK_SIZE - 32,
				    (unsigned long)p);

#if 0

		/* This is copied from the ip-27 code in the mips64 tree */

		struct task_struct *p;

		/*
		 * The following code is purely to make sure
		 * Linux can schedule processes on this slave.
		 */
		kernel_thread(0, NULL, CLONE_PID);
		p = init_task.prev_task;
		sprintf(p->comm, "%s%d", "Idle", i);
		init_tasks[i] = p;
		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually *
		del_from_runqueue(p);
		unhash_process(p);
		/* Attach to the address space of init_task. */
		atomic_inc(&init_mm.mm_count);
		p->active_mm = &init_mm;
		prom_boot_secondary(i, 
				    (unsigned long)p + KERNEL_STACK_SIZE - 32,
				    (unsigned long)p);
#endif
	}

	/* Wait for everyone to come up */
	while (atomic_read(&cpus_booted) != smp_num_cpus);
}
void __init smp_boot_cpus(void)
{
	extern struct current_set_struct current_set[];
	extern void __secondary_start_chrp(void);
	int i, cpu_nr;
	struct task_struct *p;
	unsigned long sp;

	printk("Entering SMP Mode...\n");
	PPCDBG(PPCDBG_SMP, "smp_boot_cpus: start.  NR_CPUS = 0x%lx\n", NR_CPUS);

	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	init_idle();

	for (i = 0; i < NR_CPUS; i++) {
		paca[i].prof_counter = 1;
		paca[i].prof_multiplier = 1;
		if(i != 0) {
		        /*
			 * Processor 0's segment table is statically 
			 * initialized to real address STAB0_PHYS_ADDR.  The
			 * Other processor's tables are created and
			 * initialized here.
			 */
			paca[i].xStab_data.virt = (unsigned long)&stab_array[PAGE_SIZE * (i-1)];
			memset((void *)paca[i].xStab_data.virt, 0, PAGE_SIZE); 
			paca[i].xStab_data.real = __v2a(paca[i].xStab_data.virt);
			paca[i].default_decr = tb_ticks_per_jiffy / decr_overclock;
		}
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	/* Probe arch for CPUs */
	cpu_nr = ppc_md.smp_probe();

	printk("Probe found %d CPUs\n", cpu_nr);

	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;

#ifdef CONFIG_PPC_ISERIES
	smp_space_timers( cpu_nr );
#endif

	printk("Waiting for %d CPUs\n", cpu_nr-1);

	for ( i = 1 ; i < cpu_nr; i++ ) {
		int c;
		struct pt_regs regs;
		
		/* create a process for the processor */
		/* we don't care about the values in regs since we'll
		   never reschedule the forked task. */
		/* We DO care about one bit in the pt_regs we
		   pass to do_fork.  That is the MSR_FP bit in
		   regs.msr.  If that bit is on, then do_fork
		   (via copy_thread) will call giveup_fpu.
		   giveup_fpu will get a pointer to our (current's)
		   last register savearea via current->thread.regs
		   and using that pointer will turn off the MSR_FP,
		   MSR_FE0 and MSR_FE1 bits.  At this point, this
		   pointer is pointing to some arbitrary point within
		   our stack */

		memset(&regs, 0, sizeof(struct pt_regs));

		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);

		PPCDBG(PPCDBG_SMP,"\tProcessor %d, task = 0x%lx\n", i, p);

		del_from_runqueue(p);
		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i].task = p;
		sp = ((unsigned long)p) + sizeof(union task_union)
			- STACK_FRAME_OVERHEAD;
		current_set[i].sp_real = (void *)__v2a(sp);

		/* wake up cpus */
		ppc_md.smp_kick_cpu(i);

		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 5000; c && !cpu_callin_map[i] ; c-- ) {
			udelay(100);
		}
		
		if ( cpu_callin_map[i] )
		{
			printk("Processor %d found.\n", i);
			PPCDBG(PPCDBG_SMP, "\tProcessor %d found.\n", i);
			/* this sync's the decr's -- Cort */
			smp_num_cpus++;
		} else {
			printk("Processor %d is stuck.\n", i);
			PPCDBG(PPCDBG_SMP, "\tProcessor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	ppc_md.smp_setup_cpu(0);
	
	if (smp_num_cpus < 2) {
	        tb_last_stamp = get_tb();
		smp_tb_synchronized = 1;
	}
}
Exemple #6
0
void schedule(void)
{
    register __ptask prev;
    register __ptask next;
    struct timer_list timer;
    jiff_t timeout = 0UL;

    prev = current;

    if (prev->t_kstackm != KSTACK_MAGIC)
        panic("Process %d exceeded kernel stack limit! magic %x\n",
            prev->pid, prev->t_kstackm);

    if (intr_count > 0) {
    /* Taking a timer IRQ during another IRQ or while in kernel space is
     * quite legal. We just dont switch then */
	printk("Aiee: scheduling in interrupt %d - %d\n",
	    intr_count, prev->pid);
	goto no_sched;
    }

    /* We have to let a task exit! */
    if (prev->state == TASK_EXITING)
	goto no_sched;

    clr_irq();
    if (prev->state == TASK_INTERRUPTIBLE) {
        if (prev->signal || (prev->timeout && (prev->timeout <= jiffies))) {
            prev->timeout = 0UL;
            prev->state = TASK_RUNNING;
        }
        else {
	    timeout = prev->timeout;
	}
    }
    /* Choose a task to run next */
    next = prev->next_run;
    if (prev->state != TASK_RUNNING)
	del_from_runqueue(prev);
    if (next == &init_task)
        next = next->next_run;

    set_irq();

    if (next != prev) {

        if (timeout) {
            init_timer(&timer);
            timer.tl_expires = timeout;
            timer.tl_data = (int) prev;
            timer.tl_function = process_timeout;
            add_timer(&timer);
        }

#ifdef CONFIG_SWAP
        if (do_swapper_run(next) == -1){
            printk("Can't become runnable %d\n", next->pid);
            panic("");
        }
#endif

        previous = prev;
        current = next;

        tswitch();  /* Won't return for a new task */

        if (timeout) {
            del_timer(&timer);
        }
    }

  no_sched:
    ;
}