Ejemplo n.º 1
0
/*
 * Returns 1 if the specified cpu should be brought up during boot.
 * Used to inhibit booting threads if they've been disabled or
 * limited on the command line
 */
int smp_generic_cpu_bootable(unsigned int nr)
{
	/* Special case - we inhibit secondary thread startup
	 * during boot if the user requests it.
	 */
	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
			return 0;
		if (smt_enabled_at_boot
		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
			return 0;
	}

	return 1;
}
Ejemplo n.º 2
0
Archivo: smp.c Proyecto: 168519/linux
/* This is called very early */
static void __init smp_init_pseries(void)
{
	int i;

	pr_debug(" -> smp_init_pSeries()\n");

	alloc_bootmem_cpumask_var(&of_spin_mask);

	/*
	 * Mark threads which are still spinning in hold loops
	 *
	 * We know prom_init will not have started them if RTAS supports
	 * query-cpu-stopped-state.
	 */
	if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
		if (cpu_has_feature(CPU_FTR_SMT)) {
			for_each_present_cpu(i) {
				if (cpu_thread_in_core(i) == 0)
					cpumask_set_cpu(i, of_spin_mask);
			}
		} else
			cpumask_copy(of_spin_mask, cpu_present_mask);

		cpumask_clear_cpu(boot_cpuid, of_spin_mask);
	}
Ejemplo n.º 3
0
static int __init pnv_init_idle_states(void)
{

	supported_cpuidle_states = 0;

	if (cpuidle_disable != IDLE_NO_OVERRIDE)
		goto out;

	pnv_probe_idle_states();

	if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
		patch_instruction(
			(unsigned int *)pnv_fastsleep_workaround_at_entry,
			PPC_INST_NOP);
		patch_instruction(
			(unsigned int *)pnv_fastsleep_workaround_at_exit,
			PPC_INST_NOP);
	} else {
		/*
		 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
		 * workaround is needed to use fastsleep. Provide sysfs
		 * control to choose how this workaround has to be applied.
		 */
		device_create_file(cpu_subsys.dev_root,
				&dev_attr_fastsleep_workaround_applyonce);
	}

	pnv_alloc_idle_core_states();

	/*
	 * For each CPU, record its PACA address in each of it's
	 * sibling thread's PACA at the slot corresponding to this
	 * CPU's index in the core.
	 */
	if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
		int cpu;

		pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
		for_each_present_cpu(cpu) {
			int base_cpu = cpu_first_thread_sibling(cpu);
			int idx = cpu_thread_in_core(cpu);
			int i;

			for (i = 0; i < threads_per_core; i++) {
				int j = base_cpu + i;

				paca_ptrs[j]->thread_sibling_pacas[idx] =
					paca_ptrs[cpu];
			}
		}
	}

	if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
		ppc_md.power_save = power7_idle;

out:
	return 0;
}
Ejemplo n.º 4
0
void spc_context_init(void)
{
    int i;
    int linux_cpu = 0; // Linux cpu that will handle spc interrupts

    for (i = 0; i < fusedos_config->nr_spcs; i++) {
        memset((void*)(&(spc_context[i].regs)), 0, sizeof(regs_t));

        // Taken from bgq_cause_ipi() in bic.c
        spc_context[i].bic_int_send = (void*)(&_puea->interrupt_send);
        spc_context[i].bic_value = cpu_thread_in_core(linux_cpu) + 1;
        spc_context[i].bic_value |= BGQ_BIC_C2C_INTTYPE_EXTERNAL << (63 - 60);
        spc_context[i].bic_value |= 0x0000000000200000ULL >> cpu_core_index_of_thread(linux_cpu);

        spc_context[i].ipi_wakeup = 0;

        spc_context[i].id = i;
        spc_context[i].start = 0;
        spc_context[i].command = 0;

        spc_context[i].mem_bot = __pa(spc_memory) + (uint64_t)(i) * (uint64_t)(SPC_MEMORY_SIZE);
        //printk("FUSEDOS spc_context_init: spc_context[%d].mem_bot %016llx\n", i, spc_context[i].mem_bot);

        memset((void*)(spc_context[i].tlb_entry), 0, sizeof(tlb_entry_t) * MAX_TLB_ENTRIES);
        spc_context[i].tlb_entry_count = 0;

        memset(spc_context[i].spcm_stack, 0, SPCM_STACK_SIZE);

        spc_context[i].spcm_sp = 0;
        spc_context[i].spcm_toc = 0;

        spc_context[i].ex_code = 0;

        spc_context[i].spcm_func.funcaddr = 0;
        spc_context[i].spcm_func.r2 = 0;

        spc_context[i].ipi_message.fcn = 0;
        spc_context[i].ipi_message.parm1 = 0;
        spc_context[i].ipi_message.parm2 = 0;

        spc_context[i].text_pstart = 0;
        spc_context[i].text_pend = 0;
        spc_context[i].data_pstart = 0;
        spc_context[i].data_pend = 0;
        spc_context[i].heap_pstart = 0;
        spc_context[i].heap_pend = 0;

        spc_context[i].scratch0 = 0;
        spc_context[i].scratch1 = 0;
        spc_context[i].scratch2 = 0;
        spc_context[i].scratch3 = 0;
        memset(spc_context[i].scratch, 0, SCRATCH_SIZE);
    }
}
Ejemplo n.º 5
0
static int smp_cell_cpu_bootable(unsigned int nr)
{
	/* Special case - we inhibit secondary thread startup
	 * during boot if the user requests it.  Odd-numbered
	 * cpus are assumed to be secondary threads.
	 */
	if (system_state < SYSTEM_RUNNING &&
	    cpu_has_feature(CPU_FTR_SMT) &&
	    !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
		return 0;

	return 1;
}
Ejemplo n.º 6
0
/* This is called very early */
static void __init smp_init_pseries(void)
{
	int i;

	pr_debug(" -> smp_init_pSeries()\n");

	/* Mark threads which are still spinning in hold loops. */
	if (cpu_has_feature(CPU_FTR_SMT)) {
		for_each_present_cpu(i) { 
			if (cpu_thread_in_core(i) == 0)
				cpu_set(i, of_spin_map);
		}
	} else {
Ejemplo n.º 7
0
int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u64 now;

	if (signal_pending(current)) {
		run->exit_reason = KVM_EXIT_INTR;
		return -EINTR;
	}

	flush_fp_to_thread(current);
	flush_altivec_to_thread(current);
	flush_vsx_to_thread(current);
	preempt_disable();

	/*
	 * Make sure we are running on thread 0, and that
	 * secondary threads are offline.
	 * XXX we should also block attempts to bring any
	 * secondary threads online.
	 */
	if (threads_per_core > 1) {
		int cpu = smp_processor_id();
		int thr = cpu_thread_in_core(cpu);

		if (thr)
			goto out;
		while (++thr < threads_per_core)
			if (cpu_online(cpu + thr))
				goto out;
	}

	kvm_guest_enter();

	__kvmppc_vcore_entry(NULL, vcpu);

	kvm_guest_exit();

	preempt_enable();
	kvm_resched(vcpu);

	now = get_tb();
	/* cancel pending dec exception if dec is positive */
	if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
		kvmppc_core_dequeue_dec(vcpu);

	return kvmppc_handle_exit(run, vcpu, current);

 out:
	preempt_enable();
	return -EBUSY;
}
Ejemplo n.º 8
0
Archivo: smp.c Proyecto: 1800alex/linux
/* This is called very early */
void __init smp_init_cell(void)
{
	int i;

	DBG(" -> smp_init_cell()\n");

	smp_ops = &bpa_iic_smp_ops;

	/* Mark threads which are still spinning in hold loops. */
	if (cpu_has_feature(CPU_FTR_SMT)) {
		for_each_present_cpu(i) {
			if (cpu_thread_in_core(i) == 0)
				cpumask_set_cpu(i, &of_spin_map);
		}
	} else
Ejemplo n.º 9
0
static int pnv_save_sprs_for_deep_states(void)
{
	int cpu;
	int rc;

	/*
	 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
	 * all cpus at boot. Get these reg values of current cpu and use the
	 * same across all cpus.
	 */
	uint64_t lpcr_val = mfspr(SPRN_LPCR);
	uint64_t hid0_val = mfspr(SPRN_HID0);
	uint64_t hid1_val = mfspr(SPRN_HID1);
	uint64_t hid4_val = mfspr(SPRN_HID4);
	uint64_t hid5_val = mfspr(SPRN_HID5);
	uint64_t hmeer_val = mfspr(SPRN_HMEER);
	uint64_t msr_val = MSR_IDLE;
	uint64_t psscr_val = pnv_deepest_stop_psscr_val;

	for_each_present_cpu(cpu) {
		uint64_t pir = get_hard_smp_processor_id(cpu);
		uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];

		rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
		if (rc != 0)
			return rc;

		rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
		if (rc != 0)
			return rc;

		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
			rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
			if (rc)
				return rc;

			rc = opal_slw_set_reg(pir,
					      P9_STOP_SPR_PSSCR, psscr_val);

			if (rc)
				return rc;
		}

		/* HIDs are per core registers */
		if (cpu_thread_in_core(cpu) == 0) {

			rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
			if (rc != 0)
				return rc;

			rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
			if (rc != 0)
				return rc;

			/* Only p8 needs to set extra HID regiters */
			if (!cpu_has_feature(CPU_FTR_ARCH_300)) {

				rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
				if (rc != 0)
					return rc;

				rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
				if (rc != 0)
					return rc;

				rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
				if (rc != 0)
					return rc;
			}
		}
	}

	return 0;
}
Ejemplo n.º 10
0
int pnv_save_sprs_for_winkle(void)
{
	int cpu;
	int rc;

	/*
	 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric accross
	 * all cpus at boot. Get these reg values of current cpu and use the
	 * same accross all cpus.
	 */
	uint64_t lpcr_val = mfspr(SPRN_LPCR);
	uint64_t hid0_val = mfspr(SPRN_HID0);
	uint64_t hid1_val = mfspr(SPRN_HID1);
	uint64_t hid4_val = mfspr(SPRN_HID4);
	uint64_t hid5_val = mfspr(SPRN_HID5);
	uint64_t hmeer_val = mfspr(SPRN_HMEER);

	for_each_possible_cpu(cpu) {
		uint64_t pir = get_hard_smp_processor_id(cpu);
		uint64_t hsprg0_val = (uint64_t)&paca[cpu];

		/*
		 * HSPRG0 is used to store the cpu's pointer to paca. Hence last
		 * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
		 * with 63rd bit set, so that when a thread wakes up at 0x100 we
		 * can use this bit to distinguish between fastsleep and
		 * deep winkle.
		 */
		hsprg0_val |= 1;

		rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
		if (rc != 0)
			return rc;

		rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
		if (rc != 0)
			return rc;

		/* HIDs are per core registers */
		if (cpu_thread_in_core(cpu) == 0) {

			rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
			if (rc != 0)
				return rc;

			rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
			if (rc != 0)
				return rc;

			rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
			if (rc != 0)
				return rc;

			rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
			if (rc != 0)
				return rc;

			rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
			if (rc != 0)
				return rc;
		}
	}

	return 0;
}