Example #1
0
void __cpu_die(unsigned int cpu)
{
	int tries;
	int cpu_status;
	unsigned int pcpu = get_hard_smp_processor_id(cpu);

	for (tries = 0; tries < 25; tries++) {
		cpu_status = query_cpu_stopped(pcpu);
		if (cpu_status == 0 || cpu_status == -1)
			break;
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(HZ/5);
	}
	if (cpu_status != 0) {
		printk("Querying DEAD? cpu %i (%i) shows %i\n",
		       cpu, pcpu, cpu_status);
	}

	/* Isolation and deallocation are definatly done by
	 * drslot_chrp_cpu.  If they were not they would be
	 * done here.  Change isolate state to Isolate and
	 * change allocation-state to Unusable.
	 */
	paca[cpu].cpu_start = 0;
}
Example #2
0
/* Search all cpu device nodes for an offline logical cpu.  If a
 * device node has a "ibm,my-drc-index" property (meaning this is an
 * LPAR), paranoid-check whether we own the cpu.  For each "thread"
 * of a cpu, if it is offline and has the same hw index as before,
 * grab that in preference.
 */
static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex)
{
	struct device_node *np = NULL;
	unsigned int best = -1U;

	while ((np = of_find_node_by_type(np, "cpu"))) {
		int nr_threads, len;
		u32 *index = (u32 *)get_property(np, "ibm,my-drc-index", NULL);
		u32 *tid = (u32 *)
			get_property(np, "ibm,ppc-interrupt-server#s", &len);

		if (!tid)
			tid = (u32 *)get_property(np, "reg", &len);

		if (!tid)
			continue;

		/* If there is a drc-index, make sure that we own
		 * the cpu.
		 */
		if (index) {
			int state;
			int rc = rtas_get_sensor(9003, *index, &state);
			if (rc != 0 || state != 1)
				continue;
		}

		nr_threads = len / sizeof(u32);

		while (nr_threads--) {
			if (0 == query_cpu_stopped(tid[nr_threads])) {
				best = tid[nr_threads];
				if (best == old_hwindex)
					goto out;
			}
		}
	}
out:
	of_node_put(np);
	return best;
}
Example #3
0
/* This is called very early */
void __init smp_init_pSeries(void)
{
	int ret, i;

	DBG(" -> smp_init_pSeries()\n");

	if (naca->interrupt_controller == IC_OPEN_PIC)
		smp_ops = &pSeries_mpic_smp_ops;
	else
		smp_ops = &pSeries_xics_smp_ops;

	/* Start secondary threads on SMT systems; primary threads
	 * are already in the running state.
	 */
	for_each_present_cpu(i) {
		if (query_cpu_stopped(get_hard_smp_processor_id(i)) == 0) {
			printk("%16.16x : starting thread\n", i);
			DBG("%16.16x : starting thread\n", i);
			rtas_call(rtas_token("start-cpu"), 3, 1, &ret,
				  get_hard_smp_processor_id(i),
				  __pa((u32)*((unsigned long *)
					      pseries_secondary_smp_init)),
				  i);
		}
	}

	if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
		vpa_init(boot_cpuid);

	/* Non-lpar has additional take/give timebase */
	if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
		smp_ops->give_timebase = pSeries_give_timebase;
		smp_ops->take_timebase = pSeries_take_timebase;
	}

	DBG(" <- smp_init_pSeries()\n");
}
Example #4
0
static void pSeries_cpu_die(unsigned int cpu)
{
	int tries;
	int cpu_status;
	unsigned int pcpu = get_hard_smp_processor_id(cpu);

	for (tries = 0; tries < 25; tries++) {
		cpu_status = query_cpu_stopped(pcpu);
		if (cpu_status == 0 || cpu_status == -1)
			break;
		msleep(200);
	}
	if (cpu_status != 0) {
		printk("Querying DEAD? cpu %i (%i) shows %i\n",
		       cpu, pcpu, cpu_status);
	}

	/* Isolation and deallocation are definatly done by
	 * drslot_chrp_cpu.  If they were not they would be
	 * done here.  Change isolate state to Isolate and
	 * change allocation-state to Unusable.
	 */
	paca[cpu].cpu_start = 0;
}