Exemplo n.º 1
0
/* Early cpumask setup - runs on TP0 */
static void brcmstb_smp_setup(void)
{
	__cpu_number_map[0] = 0;
	__cpu_logical_map[0] = 0;
	set_cpu_possible(0, 1);
	set_cpu_present(0, 1);

	__cpu_number_map[1] = 1;
	__cpu_logical_map[1] = 1;
	set_cpu_possible(1, 1);
	set_cpu_present(1, 1);

#if defined(CONFIG_BMIPS4380)
	/* NBK and weak order flags */
	set_c0_brcm_config_0(0x30000);

	/*
	 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other TP
	 * MIPS interrupt 2 (HW INT 0) is the TP0 L1 controller output
	 * MIPS interrupt 3 (HW INT 1) is the TP1 L1 controller output
	 */
	change_c0_brcm_cmt_intr(0xf8018000, (0x02 << 27) | (0x03 << 15));
#elif defined(CONFIG_BMIPS5000)
	/* enable raceless SW interrupts */
	set_c0_brcm_config(0x03 << 22);

	/* clear any pending SW interrupts */
	write_c0_brcm_action(0x2000 | (0 << 9) | (0 << 8));
	write_c0_brcm_action(0x2000 | (0 << 9) | (1 << 8));
	write_c0_brcm_action(0x2000 | (1 << 9) | (0 << 8));
	write_c0_brcm_action(0x2000 | (1 << 9) | (1 << 8));
#endif
}
Exemplo n.º 2
0
static void __init qcom_smp_prepare_cpus(unsigned int max_cpus)
{
	int cpu, map;
	unsigned int flags = 0;
	static const int cold_boot_flags[] = {
		0,
		SCM_FLAG_COLDBOOT_CPU1,
		SCM_FLAG_COLDBOOT_CPU2,
		SCM_FLAG_COLDBOOT_CPU3,
	};

	for_each_present_cpu(cpu) {
		map = cpu_logical_map(cpu);
		if (WARN_ON(map >= ARRAY_SIZE(cold_boot_flags))) {
			set_cpu_present(cpu, false);
			continue;
		}
		flags |= cold_boot_flags[map];
	}

	if (scm_set_boot_addr(virt_to_phys(secondary_startup), flags)) {
		for_each_present_cpu(cpu) {
			if (cpu == smp_processor_id())
				continue;
			set_cpu_present(cpu, false);
		}
		pr_warn("Failed to set CPU boot address, disabling SMP\n");
	}
}
Exemplo n.º 3
0
static void __init octeon_smp_setup(void)
{
	const int coreid = cvmx_get_core_num();
	int cpus;
	int id;
	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();

#ifdef CONFIG_HOTPLUG_CPU
	int core_mask = octeon_get_boot_coremask();
	unsigned int num_cores = cvmx_octeon_num_cores();
#endif

	/* The present CPUs are initially just the boot cpu (CPU 0). */
	for (id = 0; id < NR_CPUS; id++) {
		set_cpu_possible(id, id == 0);
		set_cpu_present(id, id == 0);
	}

	__cpu_number_map[coreid] = 0;
	__cpu_logical_map[0] = coreid;

	/* The present CPUs get the lowest CPU numbers. */
	cpus = 1;
	for (id = 0; id < NR_CPUS; id++) {
		if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
			set_cpu_possible(cpus, true);
			set_cpu_present(cpus, true);
			__cpu_number_map[id] = cpus;
			__cpu_logical_map[cpus] = id;
			cpus++;
		}
	}

#ifdef CONFIG_HOTPLUG_CPU
	/*
	 * The possible CPUs are all those present on the chip.	 We
	 * will assign CPU numbers for possible cores as well.	Cores
	 * are always consecutively numberd from 0.
	 */
	for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
		     id < num_cores && id < NR_CPUS; id++) {
		if (!(core_mask & (1 << id))) {
			set_cpu_possible(cpus, true);
			__cpu_number_map[id] = cpus;
			__cpu_logical_map[cpus] = id;
			cpus++;
		}
	}
#endif

	octeon_smp_hotplug_setup();
}
static void j2_prepare_cpus(unsigned int max_cpus)
{
	struct device_node *np;
	unsigned i, max = 1;

	np = of_find_compatible_node(NULL, NULL, "jcore,ipi-controller");
	if (!np)
		goto out;

	j2_ipi_irq = irq_of_parse_and_map(np, 0);
	j2_ipi_trigger = of_iomap(np, 0);
	if (!j2_ipi_irq || !j2_ipi_trigger)
		goto out;

	np = of_find_compatible_node(NULL, NULL, "jcore,cpuid-mmio");
	if (!np)
		goto out;

	sh2_cpuid_addr = of_iomap(np, 0);
	if (!sh2_cpuid_addr)
		goto out;

	if (request_irq(j2_ipi_irq, j2_ipi_interrupt_handler, IRQF_PERCPU,
			"ipi", (void *)j2_ipi_interrupt_handler) != 0)
		goto out;

	max = max_cpus;
out:
	/* Disable any cpus past max_cpus, or all secondaries if we didn't
	 * get the necessary resources to support SMP. */
	for (i=max; i<NR_CPUS; i++) {
		set_cpu_possible(i, false);
		set_cpu_present(i, false);
	}
}
Exemplo n.º 5
0
/*
 * Update the present map for a cpu node which is going away, and set
 * the hard id in the paca(s) to -1 to be consistent with boot time
 * convention for non-present cpus.
 */
static void pseries_remove_processor(struct device_node *np)
{
	unsigned int cpu;
	int len, nthreads, i;
	const __be32 *intserv;
	u32 thread;

	intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
	if (!intserv)
		return;

	nthreads = len / sizeof(u32);

	cpu_maps_update_begin();
	for (i = 0; i < nthreads; i++) {
		thread = be32_to_cpu(intserv[i]);
		for_each_present_cpu(cpu) {
			if (get_hard_smp_processor_id(cpu) != thread)
				continue;
			BUG_ON(cpu_online(cpu));
			set_cpu_present(cpu, false);
			set_hard_smp_processor_id(cpu, -1);
			update_numa_cpu_lookup_table(cpu, -1);
			break;
		}
		if (cpu >= nr_cpu_ids)
			printk(KERN_WARNING "Could not find cpu to remove "
			       "with physical id 0x%x\n", thread);
	}
	cpu_maps_update_done();
}
Exemplo n.º 6
0
static void __init xen_filter_cpu_maps(void)
{
	int i, rc;
	unsigned int subtract = 0;

	if (!xen_initial_domain())
		return;

	num_processors = 0;
	disabled_cpus = 0;
	for (i = 0; i < nr_cpu_ids; i++) {
		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
		if (rc >= 0) {
			num_processors++;
			set_cpu_possible(i, true);
		} else {
			set_cpu_possible(i, false);
			set_cpu_present(i, false);
			subtract++;
		}
	}
#ifdef CONFIG_HOTPLUG_CPU
	/* This is akin to using 'nr_cpus' on the Linux command line.
	 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
	 * have up to X, while nr_cpu_ids is greater than X. This
	 * normally is not a problem, except when CPU hotplugging
	 * is involved and then there might be more than X CPUs
	 * in the guest - which will not work as there is no
	 * hypercall to expand the max number of VCPUs an already
	 * running guest has. So cap it up to X. */
	if (subtract)
		nr_cpu_ids = nr_cpu_ids - subtract;
#endif

}
Exemplo n.º 7
0
static void __init  brcm_smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int ncores = get_core_count();
	unsigned int cpu = smp_processor_id();
	int i;

	
	/* sanity check */
	if (ncores == 0) {
		printk(KERN_ERR
		       "MPCORE: strange CPU count of 0? Default to 1\n");

		ncores = 1;
	}

	if (ncores > NR_CPUS) {
		printk(KERN_WARNING
		       "MPCORE: no. of cores (%d) greater than configured "
		       "maximum of %d - clipping\n",
		       ncores, NR_CPUS);
		ncores = NR_CPUS;
	}
	printk(KERN_INFO "%d cores has been found\n",ncores);
	/*
	 * are we trying to boot more cores than exist?
	 */
	if (max_cpus > ncores)
		max_cpus = ncores;

	/*
	 * Initialise the present map, which describes the set of CPUs
	 * actually populated at the present time.
	 */
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	/*
	 * Initialise the SCU if there are more than one CPU and let
	 * them know where to start. Note that, on modern versions of
	 * MILO, the "poke" doesn't actually do anything until each
	 * individual core is sent a soft interrupt to get it out of
	 * WFI
	 */
	if (max_cpus > 1) {
		/* nobody is to be released from the pen yet */
		pen_release = -1;

		/*
		 * Enable the local timer or broadcast device for the
		 * boot CPU, but only if we have more than one CPU.
		 */
//		percpu_timer_setup();

		scu_enable(scu_base_addr());

		/* Wakeup other cores in an SoC-specific manner */
		plat_wake_secondary_cpu( max_cpus, platform_secondary_startup );

	}
}
static int setup_cpu_watcher(struct notifier_block *notifier,
			      unsigned long event, void *data)
{
	int cpu;
	static struct xenbus_watch cpu_watch = {
		.node = "cpu",
		.callback = handle_vcpu_hotplug_event};

	(void)register_xenbus_watch(&cpu_watch);

	for_each_possible_cpu(cpu) {
		if (vcpu_online(cpu) == 0) {
			(void)cpu_down(cpu);
			set_cpu_present(cpu, false);
		}
	}

	return NOTIFY_DONE;
}

static int __init setup_vcpu_hotplug_event(void)
{
	static struct notifier_block xsn_cpu = {
		.notifier_call = setup_cpu_watcher };

	if (!xen_pv_domain())
		return -ENODEV;

	register_xenstore_notifier(&xsn_cpu);

	return 0;
}

arch_initcall(setup_vcpu_hotplug_event);
Exemplo n.º 9
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int ncores = scu_get_core_count(scu_base);
	unsigned int cpu = smp_processor_id();
	int i;

	smp_store_cpu_info(cpu);

	/*
	 * are we trying to boot more cores than exist?
	 */
	if (max_cpus > ncores)
		max_cpus = ncores;

	/*
	 * Initialise the present map, which describes the set of CPUs
	 * actually populated at the present time.
	 */
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	/*
	 * Initialise the SCU if there are more than one CPU and let
	 * them know where to start. Note that, on modern versions of
	 * MILO, the "poke" doesn't actually do anything until each
	 * individual core is sent a soft interrupt to get it out of
	 * WFI
	 */
	if (max_cpus > 1) {
		percpu_timer_setup();
		scu_enable(scu_base);
	}
}
Exemplo n.º 10
0
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
	int					i;
	u32					bstadd;
	u32					bstsize;
	u32					*phead_address;

	if (get_ambarella_bstmem_info(&bstadd, &bstsize) != AMB_BST_MAGIC) {
		pr_err("Can't find SMP BST!\n");
		return;
	}
	phead_address = get_ambarella_bstmem_head();
	if (phead_address == (u32 *)AMB_BST_INVALID) {
		pr_err("Can't find SMP BST Head!\n");
		return;
	}

	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);
	scu_enable(scu_base);
	for (i = 1; i < max_cpus; i++) {
		phead_address[PROCESSOR_START_0 + i] = BSYM(
			virt_to_phys(ambarella_secondary_startup));
		phead_address[PROCESSOR_STATUS_0 + i] = AMB_BST_START_COUNTER;
	}
	ambcache_flush_range((void *)(phead_address),
		AMBARELLA_BST_HEAD_CACHE_SIZE);
	smp_max_cpus = max_cpus;
}
Exemplo n.º 11
0
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
	int i;

	/*
	 * Remap the first three addresses at zero which are used
	 * for 32bit long jump for SMP. Look at zynq_cpu1_start()
	 */
#if defined(CONFIG_PHYS_OFFSET) && (CONFIG_PHYS_OFFSET != 0)
	zero = ioremap(0, 12);
	if (!zero) {
		printk(KERN_WARNING
			"!!!! BOOTUP jump vectors can't be used !!!!\n");
		while (1)
			;
	}
#else
	/* The first three addresses at zero are already mapped */
	zero = (u8 *)CONFIG_PAGE_OFFSET;
#endif

	/*
	 * Initialise the present map, which describes the set of CPUs
	 * actually populated at the present time.
	 */
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	scu_enable(SCU_PERIPH_BASE);
}
Exemplo n.º 12
0
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
	int i;
	printk(KERN_ERR "%s %d\n", __FUNCTION__, __LINE__);
	/*
	 * Initialise the present map, which describes the set of CPUs
	 * actually populated at the present time.
	 */
	
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	scu_enable(scu_base);
	/*
	 * Enable gic interrupts on the secondary CPU so the interrupt that wakes
	 * it from WFI can be received
	 */
	writel(1, __io_address(OX820_GIC_CPUN_BASE_ADDR(1)));

	/* Write the address that we want the cpu to start at. */
	writel(virt_to_phys(ox820_secondary_startup), HOLDINGPEN_LOCATION);
	smp_wmb();
	writel(1, HOLDINGPEN_CPU);
	smp_wmb();

}
Exemplo n.º 13
0
static void __init msm_platform_smp_prepare_cpus(unsigned int max_cpus)
{
	int cpu, map;
	unsigned int flags = 0;

	if (scm_is_mc_boot_available())
		return msm_platform_smp_prepare_cpus_mc(max_cpus);

	for_each_present_cpu(cpu) {
		map = cpu_logical_map(cpu);
		if (map > ARRAY_SIZE(cold_boot_flags)) {
			set_cpu_present(cpu, false);
			__WARN();
			continue;
		}
		flags |= cold_boot_flags[map];
	}

#ifdef CONFIG_HTC_DEBUG_FOOTPRINT
	init_cpu_debug_counter_for_cold_boot();
#endif

	if (scm_set_boot_addr(virt_to_phys(msm_secondary_startup), flags))
		pr_warn("Failed to set CPU boot address\n");
}
Exemplo n.º 14
0
Arquivo: smp.c Projeto: 24hours/linux
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned i;

	for (i = 0; i < max_cpus; ++i)
		set_cpu_present(i, true);
}
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
	unsigned ncores, core_vpes, c, cca;
	bool cca_unsuitable;
	u32 *entry_code;

	mips_mt_set_cpuoptions();

	/* Detect whether the CCA is unsuited to multi-core SMP */
	cca = read_c0_config() & CONF_CM_CMASK;
	switch (cca) {
	case 0x4: /* CWBE */
	case 0x5: /* CWB */
		/* The CCA is coherent, multi-core is fine */
		cca_unsuitable = false;
		break;

	default:
		/* CCA is not coherent, multi-core is not usable */
		cca_unsuitable = true;
	}

	/* Warn the user if the CCA prevents multi-core */
	ncores = mips_cm_numcores();
	if (cca_unsuitable && ncores > 1) {
		pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
			cca);

		for_each_present_cpu(c) {
			if (cpu_data[c].core)
				set_cpu_present(c, false);
		}
	}
Exemplo n.º 16
0
static void __init axxia_smp_prepare_cpus(unsigned int max_cpus)
{
	int cpu_count = 0;
	int cpu;

	/*
	 * Initialise the present map, which describes the set of CPUs actually
	 * populated at the present time.
	 */
	for_each_possible_cpu(cpu) {
		struct device_node *np;
		u32 release_phys;

		np = of_get_cpu_node(cpu, NULL);
		if (!np)
			continue;
		if (of_property_read_u32(np, "cpu-release-addr", &release_phys))
			continue;

		if (cpu_count < max_cpus) {
			set_cpu_present(cpu, true);
			cpu_count++;
		}

		if (release_phys != 0)
			write_release_addr(release_phys);
	}
}
Exemplo n.º 17
0
static void __init xen_filter_cpu_maps(void)
{
	int i, rc;
	unsigned int subtract = 0;

	if (!xen_initial_domain())
		return;

	num_processors = 0;
	disabled_cpus = 0;
	for (i = 0; i < nr_cpu_ids; i++) {
		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
		if (rc >= 0) {
			num_processors++;
			set_cpu_possible(i, true);
		} else {
			set_cpu_possible(i, false);
			set_cpu_present(i, false);
			subtract++;
		}
	}
#ifdef CONFIG_HOTPLUG_CPU
	if (subtract)
		nr_cpu_ids = nr_cpu_ids - subtract;
#endif

}
Exemplo n.º 18
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned i;

	for_each_possible_cpu(i)
		set_cpu_present(i, true);
}
Exemplo n.º 19
0
static void disable_hotplug_cpu(int cpu)
{
	if (cpu_present(cpu))
		arch_unregister_cpu(cpu);

	set_cpu_present(cpu, false);
}
Exemplo n.º 20
0
static void enable_hotplug_cpu(int cpu)
{
	if (!cpu_present(cpu))
		xen_arch_register_cpu(cpu);

	set_cpu_present(cpu, true);
}
Exemplo n.º 21
0
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int ncores = get_core_count();
	unsigned long addr;
	int i;

	edb_trace(1);
	edb_putstr("smp_prepare_cpus\n");

	/* sanity check */
	if (ncores == 0) {
		printk(KERN_ERR
			"hisik3: strange CM count of 0? Default to 1\n");

		ncores = 1;
	}

	if (ncores > NR_CPUS) {
		printk(KERN_WARNING
		       "hisik3: no. of cores (%d) greater than configured "
		       "maximum of %d - clipping\n",
		       ncores, NR_CPUS);
		ncores = NR_CPUS;
	}

	/*
	 * are we trying to boot more cores than exist?
	 */
	if (max_cpus > ncores) {
		WARN(1, "hisik3: smp max cpus should NOT more cores than exist\n");
		max_cpus = ncores;
	}

	/*
	 * Initialise the present map, which describes the set of CPUs
	 * actually populated at the present time.
	 */
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	scu_enable(scu_base_addr());


	addr = (unsigned long) IO_ADDRESS(MEMORY_AXI_SECOND_CPU_BOOT_ADDR);

	printk("poke_milo addr 0x%lx at 0x%x\n", addr, virt_to_phys(k3v2_secondary_startup));

	/*
	 * Write the address of secondary startup into the system-wide flags
	 * register. The BootMonitor waits for this register to become
	 * non-zero.
	 */
	writel(BSYM(virt_to_phys(k3v2_secondary_startup)), addr);

	wmb();
	flush_cache_all();

	edb_putstr("smp_prepare_cpus out\n");
}
static void __init cps_smp_setup(void)
{
	unsigned int ncores, nvpes, core_vpes;
	unsigned long core_entry;
	int c, v;

	/* Detect & record VPE topology */
	ncores = mips_cm_numcores();
	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
	for (c = nvpes = 0; c < ncores; c++) {
		core_vpes = core_vpe_count(c);
		pr_cont("%c%u", c ? ',' : '{', core_vpes);

		/* Use the number of VPEs in core 0 for smp_num_siblings */
		if (!c)
			smp_num_siblings = core_vpes;

		for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
			cpu_data[nvpes + v].core = c;
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
			cpu_data[nvpes + v].vpe_id = v;
#endif
		}

		nvpes += core_vpes;
	}
	pr_cont("} total %u\n", nvpes);

	/* Indicate present CPUs (CPU being synonymous with VPE) */
	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
		set_cpu_possible(v, true);
		set_cpu_present(v, true);
		__cpu_number_map[v] = v;
		__cpu_logical_map[v] = v;
	}

	/* Set a coherent default CCA (CWB) */
	change_c0_config(CONF_CM_CMASK, 0x5);

	/* Core 0 is powered up (we're running on it) */
	bitmap_set(core_power, 0, 1);

	/* Initialise core 0 */
	mips_cps_core_init();

	/* Make core 0 coherent with everything */
	write_gcr_cl_coherence(0xff);

	if (mips_cm_revision() >= CM_REV_CM3) {
		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
		write_gcr_bev_base(core_entry);
	}

#ifdef CONFIG_MIPS_MT_FPAFF
	/* If we have an FPU, enroll ourselves in the FPU-full mask */
	if (cpu_has_fpu)
		cpumask_set_cpu(0, &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
}
Exemplo n.º 23
0
static void __init boot_cpu_init(void)
{
	int cpu = smp_processor_id();
	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
	set_cpu_online(cpu, true);
	set_cpu_present(cpu, true);
	set_cpu_possible(cpu, true);
}
Exemplo n.º 24
0
static void ct_ca9x4_smp_enable(unsigned int max_cpus)
{
	int i;
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	scu_enable(MMIO_P2V(A9_MPCORE_SCU));
}
Exemplo n.º 25
0
/* Called very early during startup to mark boot cpu as online */
void __init smp_prepare_boot_cpu(void)
{
	int cpu = smp_processor_id();
	set_cpu_online(cpu, 1);
	set_cpu_present(cpu, 1);
	__get_cpu_var(cpu_state) = CPU_ONLINE;

	init_messaging();
}
Exemplo n.º 26
0
/* Called very early during startup to mark boot cpu as online */
void __init smp_prepare_boot_cpu(void)
{
	int cpu = smp_processor_id();
	set_cpu_online(cpu, 1);
	set_cpu_present(cpu, 1);
	__this_cpu_write(cpu_state, CPU_ONLINE);

	init_messaging();
}
Exemplo n.º 27
0
static void __init boot_cpu_init(void)
{
	int cpu = smp_processor_id();
	/* 主核总是可用的 */
	set_cpu_online(cpu, true);
	set_cpu_active(cpu, true);
	set_cpu_present(cpu, true);
	set_cpu_possible(cpu, true);
}
Exemplo n.º 28
0
void __init smp_prepare_boot_cpu(void)
{
	int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;

	/* Setup BSP mappings */
	printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);

	set_cpu_online(bootstrap_processor, true);
	set_cpu_present(bootstrap_processor, true);
}
Exemplo n.º 29
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int ncores = get_core_count();
	unsigned int cpu = smp_processor_id();
	int i;

	/* sanity check */
	if (ncores == 0) {
		printk(KERN_ERR
		       "Realview: strange CM count of 0? Default to 1\n");

		ncores = 1;
	}

	if (ncores > NR_CPUS) {
		printk(KERN_WARNING
		       "Realview: no. of cores (%d) greater than configured "
		       "maximum of %d - clipping\n",
		       ncores, NR_CPUS);
		ncores = NR_CPUS;
	}

	smp_store_cpu_info(cpu);

	/*
	 * are we trying to boot more cores than exist?
	 */
	if (max_cpus > ncores)
		max_cpus = ncores;

	/*
	 * Initialise the present map, which describes the set of CPUs
	 * actually populated at the present time.
	 */
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	/*
	 * Initialise the SCU if there are more than one CPU and let
	 * them know where to start. Note that, on modern versions of
	 * MILO, the "poke" doesn't actually do anything until each
	 * individual core is sent a soft interrupt to get it out of
	 * WFI
	 */
	if (max_cpus > 1) {
		/*
		 * Enable the local timer or broadcast device for the
		 * boot CPU, but only if we have more than one CPU.
		 */
		percpu_timer_setup();

		scu_enable(scu_base_addr());
		poke_milo();
	}
}
Exemplo n.º 30
0
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
    unsigned cpu;
    unsigned int i;

    if (skip_ioapic_setup) {
        char *m = (max_cpus == 0) ?
                  "The nosmp parameter is incompatible with Xen; " \
                  "use Xen dom0_max_vcpus=1 parameter" :
                  "The noapic parameter is incompatible with Xen";

        xen_raw_printk(m);
        panic(m);
    }
    xen_init_lock_cpu(0);

    smp_store_cpu_info(0);
    cpu_data(0).x86_max_cores = 1;

    for_each_possible_cpu(i) {
        zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
        zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
        zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
    }
    set_cpu_sibling_map(0);

    if (xen_smp_intr_init(0))
        BUG();

    if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
        panic("could not allocate xen_cpu_initialized_map\n");

    cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));

    /* Restrict the possible_map according to max_cpus. */
    while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
        for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
            continue;
        set_cpu_possible(cpu, false);
    }

    for_each_possible_cpu (cpu) {
        struct task_struct *idle;

        if (cpu == 0)
            continue;

        idle = fork_idle(cpu);
        if (IS_ERR(idle))
            panic("failed fork for CPU %d", cpu);

        set_cpu_present(cpu, true);
    }
}