Exemple #1
0
/*
 * Bring one cpu online.
 */
int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
	long timeout;

	task_thread_info(idle)->cpu = cpuid;

	/* Let _start know what logical CPU we're booting
	** (offset into init_tasks[],cpu_data[])
	*/
	cpu_now_booting = cpuid;

	/* 
	** boot strap code needs to know the task address since
	** it also contains the process stack.
	*/
	smp_init_current_idle_task = idle ;
	mb();

	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);

	/*
	** This gets PDC to release the CPU from a very tight loop.
	**
	** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
	** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which 
	** is executed after receiving the rendezvous signal (an interrupt to 
	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
	** contents of memory are valid."
	*/
	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
	mb();

	/* 
	 * OK, wait a bit for that CPU to finish staggering about. 
	 * Slave will set a bit when it reaches smp_cpu_init().
	 * Once the "monarch CPU" sees the bit change, it can move on.
	 */
	for (timeout = 0; timeout < 10000; timeout++) {
		if(cpu_online(cpuid)) {
			/* Which implies Slave has started up */
			cpu_now_booting = 0;
			smp_init_current_idle_task = NULL;
			goto alive ;
		}
		udelay(100);
		barrier();
	}
	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
	return -1;

alive:
	/* Remember the Slave data */
	smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
		cpuid, timeout * 100);
	return 0;
}
Exemple #2
0
/*
 * ipi_send()
 *	Send an Interprocessor Interrupt.
 */
static void ipi_send(int cpu, enum ipi_message_type op)
{
	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpu);
	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
	unsigned long flags;

	/*
	 * We protect the setting of the ipi_pending field and ensure
	 * that the ipi delivery mechanism and interrupt are atomically
	 * handled.
	 */
	spin_lock_irqsave(lock, flags);
	p->ipi_pending |= 1 << op;
	spin_unlock_irqrestore(lock, flags);

	spin_lock_irqsave(&smp_ipi_lock, flags);
	smp_needs_ipi |= (1 << p->tid);
	ubicom32_set_interrupt(smp_ipi_irq);
	spin_unlock_irqrestore(&smp_ipi_lock, flags);
	smp_debug(100, KERN_INFO "cpu[%d]: send: %d\n", cpu, op);
}
Exemple #3
0
/*
 * ipi_interrupt()
 *	Handle an Interprocessor Interrupt.
 */
static irqreturn_t ipi_interrupt(int irq, void *dev_id)
{
	int cpuid = smp_processor_id();
	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid);
	unsigned long ops;

	/*
	 * Count this now; we may make a call that never returns.
	 */
	p->ipi_count++;

	/*
	 * We are about to process all ops.  If another cpu has stated
	 * that we need an IPI, we will have already processed it.  By
	 * clearing our smp_needs_ipi, and processing all ops,
	 * we reduce the number of IPI interrupts.  However, this introduces
	 * the possibility that smp_needs_ipi will be clear and the soft irq
	 * will have gone off; so we need to make the get_affinity() path
	 * tolerant of spurious interrupts.
	 */
	spin_lock(&smp_ipi_lock);
	smp_needs_ipi &= ~(1 << p->tid);
	spin_unlock(&smp_ipi_lock);

	for (;;) {
		/*
		 * Read the set of IPI commands we should handle.
		 */
		spinlock_t *lock = &per_cpu(ipi_lock, cpuid);
		spin_lock(lock);
		ops = p->ipi_pending;
		p->ipi_pending = 0;
		spin_unlock(lock);

		/*
		 * If we have no IPI commands to execute, break out.
		 */
		if (!ops) {
			break;
		}

		/*
		 * Execute the set of commands in the ops word, one command
		 * at a time in no particular order.  Strip of each command
		 * as we execute it.
		 */
		while (ops) {
			unsigned long which = ffz(~ops);
			ops &= ~(1 << which);

			BUG_ON(!irqs_disabled());
			switch (which) {
			case IPI_NOP:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_NOP\n", cpuid);
				break;

			case IPI_RESCHEDULE:
				/*
				 * Reschedule callback.  Everything to be
				 * done is done by the interrupt return path.
				 */
				smp_debug(200, KERN_INFO "cpu[%d]: "
					  "IPI_RESCHEDULE\n", cpuid);
				break;

			case IPI_CALL_FUNC:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_CALL_FUNC\n", cpuid);
				generic_smp_call_function_interrupt();
				break;

			case IPI_CALL_FUNC_SINGLE:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_CALL_FUNC_SINGLE\n", cpuid);
				generic_smp_call_function_single_interrupt();
				break;

			case IPI_CPU_STOP:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_CPU_STOP\n", cpuid);
				smp_halt_processor();
				break;

#if !defined(CONFIG_LOCAL_TIMERS)
			case IPI_CPU_TIMER:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_CPU_TIMER\n", cpuid);
#if defined(CONFIG_GENERIC_CLOCKEVENTS)
				local_timer_interrupt();
#else
				update_process_times(user_mode(get_irq_regs()));
				profile_tick(CPU_PROFILING);
#endif
#endif
				break;

			default:
				printk(KERN_CRIT "cpu[%d]: "
					  "Unknown IPI: %lu\n", cpuid, which);

				return IRQ_NONE;
			}
		}
	}
	return IRQ_HANDLED;
}
/*
 * Bring one cpu online.
 */
int smp_boot_one_cpu(int cpuid)
{
	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
	struct task_struct *idle;
	long timeout;

	/* 
	 * Create an idle task for this CPU.  Note the address wed* give 
	 * to kernel_thread is irrelevant -- it's going to start
	 * where OS_BOOT_RENDEVZ vector in SAL says to start.  But
	 * this gets all the other task-y sort of data structures set
	 * up like we wish.   We need to pull the just created idle task 
	 * off the run queue and stuff it into the init_tasks[] array.  
	 * Sheesh . . .
	 */

	idle = fork_idle(cpuid);
	if (IS_ERR(idle))
		panic("SMP: fork failed for CPU:%d", cpuid);

	task_thread_info(idle)->cpu = cpuid;

	/* Let _start know what logical CPU we're booting
	** (offset into init_tasks[],cpu_data[])
	*/
	cpu_now_booting = cpuid;

	/* 
	** boot strap code needs to know the task address since
	** it also contains the process stack.
	*/
	smp_init_current_idle_task = idle ;
	mb();

	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);

	/*
	** This gets PDC to release the CPU from a very tight loop.
	**
	** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
	** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which 
	** is executed after receiving the rendezvous signal (an interrupt to 
	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
	** contents of memory are valid."
	*/
	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
	mb();

	/* 
	 * OK, wait a bit for that CPU to finish staggering about. 
	 * Slave will set a bit when it reaches smp_cpu_init().
	 * Once the "monarch CPU" sees the bit change, it can move on.
	 */
	for (timeout = 0; timeout < 10000; timeout++) {
		if(cpu_online(cpuid)) {
			/* Which implies Slave has started up */
			cpu_now_booting = 0;
			smp_init_current_idle_task = NULL;
			goto alive ;
		}
		udelay(100);
		barrier();
	}

	put_task_struct(idle);
	idle = NULL;

	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
	return -1;

alive:
	/* Remember the Slave data */
	smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
		cpuid, timeout * 100);
	return 0;
}
irqreturn_t __irq_entry
ipi_interrupt(int irq, void *dev_id) 
{
	int this_cpu = smp_processor_id();
	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
	unsigned long ops;
	unsigned long flags;

	/* Count this now; we may make a call that never returns. */
	p->ipi_count++;

	mb();	/* Order interrupt and bit testing. */

	for (;;) {
		spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
		spin_lock_irqsave(lock, flags);
		ops = p->pending_ipi;
		p->pending_ipi = 0;
		spin_unlock_irqrestore(lock, flags);

		mb(); /* Order bit clearing and data access. */

		if (!ops)
		    break;

		while (ops) {
			unsigned long which = ffz(~ops);

			ops &= ~(1 << which);

			switch (which) {
			case IPI_NOP:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
				break;
				
			case IPI_RESCHEDULE:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
				scheduler_ipi();
				break;

			case IPI_CALL_FUNC:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
				generic_smp_call_function_interrupt();
				break;

			case IPI_CALL_FUNC_SINGLE:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
				generic_smp_call_function_single_interrupt();
				break;

			case IPI_CPU_START:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
				break;

			case IPI_CPU_STOP:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
				halt_processor();
				break;

			case IPI_CPU_TEST:
				smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
				break;

			default:
				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
					this_cpu, which);
				return IRQ_NONE;
			} /* Switch */
		/* let in any pending interrupts */
		local_irq_enable();
		local_irq_disable();
		} /* while (ops) */
	}
	return IRQ_HANDLED;
}
Exemple #6
0
irqreturn_t
ipi_interrupt(int irq, void *dev_id) 
{
	int this_cpu = smp_processor_id();
	struct cpuinfo_parisc *p = &cpu_data[this_cpu];
	unsigned long ops;
	unsigned long flags;

	/* Count this now; we may make a call that never returns. */
	p->ipi_count++;

	mb();	/* Order interrupt and bit testing. */

	for (;;) {
		spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
		spin_lock_irqsave(lock, flags);
		ops = p->pending_ipi;
		p->pending_ipi = 0;
		spin_unlock_irqrestore(lock, flags);

		mb(); /* Order bit clearing and data access. */

		if (!ops)
		    break;

		while (ops) {
			unsigned long which = ffz(~ops);

			ops &= ~(1 << which);

			switch (which) {
			case IPI_NOP:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
				break;
				
			case IPI_RESCHEDULE:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
				/*
				 * Reschedule callback.  Everything to be
				 * done is done by the interrupt return path.
				 */
				break;

			case IPI_CALL_FUNC:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
				{
					volatile struct smp_call_struct *data;
					void (*func)(void *info);
					void *info;
					int wait;

					data = smp_call_function_data;
					func = data->func;
					info = data->info;
					wait = data->wait;

					mb();
					atomic_dec ((atomic_t *)&data->unstarted_count);

					/* At this point, *data can't
					 * be relied upon.
					 */

					(*func)(info);

					/* Notify the sending CPU that the
					 * task is done.
					 */
					mb();
					if (wait)
						atomic_dec ((atomic_t *)&data->unfinished_count);
				}
				break;

			case IPI_CPU_START:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
				break;

			case IPI_CPU_STOP:
				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
				halt_processor();
				break;

			case IPI_CPU_TEST:
				smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
				break;

			default:
				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
					this_cpu, which);
				return IRQ_NONE;
			} /* Switch */
		/* let in any pending interrupts */
		local_irq_enable();
		local_irq_disable();
		} /* while (ops) */
	}
	return IRQ_HANDLED;
}