예제 #1
0
파일: softirq.c 프로젝트: h4ck3rm1k3/linux
static __init int spawn_ksoftirqd(void)
{
	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
				  takeover_tasklets);
	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));

	return 0;
}
예제 #2
0
파일: enlighten.c 프로젝트: asmalldev/linux
int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
		    int (*cpu_dead_cb)(unsigned int))
{
	int rc;

	rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
				       "x86/xen/hvm_guest:prepare",
				       cpu_up_prepare_cb, cpu_dead_cb);
	if (rc >= 0) {
		rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
					       "x86/xen/hvm_guest:online",
					       xen_cpu_up_online, NULL);
		if (rc < 0)
			cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
	}

	return rc >= 0 ? 0 : rc;
}
static int __init loongson3_init(void)
{
	on_each_cpu(reset_counters, NULL, 1);
	cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
				  "mips/oprofile/loongson3:starting",
				  loongson3_starting_cpu, loongson3_dying_cpu);
	save_perf_irq = perf_irq;
	perf_irq = loongson3_perfcount_handler;

	return 0;
}
예제 #4
0
static __init int blk_softirq_init(void)
{
	int i;

	for_each_possible_cpu(i)
		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));

	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
				  "block/softirq:dead", NULL,
				  blk_softirq_cpu_dead);
	return 0;
}
예제 #5
0
static int __init bman_portal_driver_register(struct platform_driver *drv)
{
	int ret;

	ret = platform_driver_register(drv);
	if (ret < 0)
		return ret;

	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
					"soc/qbman_portal:online",
					bman_online_cpu, bman_offline_cpu);
	if (ret < 0) {
		pr_err("bman: failed to register hotplug callbacks.\n");
		platform_driver_unregister(drv);
		return ret;
	}
	return 0;
}
예제 #6
0
static int register_shx3_cpu_notifier(void)
{
	cpuhp_setup_state_nocalls(CPUHP_SH_SH3X_PREPARE, "sh/shx3:prepare",
				  shx3_cpu_prepare, NULL);
	return 0;
}
예제 #7
0
static int etm_probe(struct amba_device *adev, const struct amba_id *id)
{
	int ret;
	void __iomem *base;
	struct device *dev = &adev->dev;
	struct coresight_platform_data *pdata = NULL;
	struct etm_drvdata *drvdata;
	struct resource *res = &adev->res;
	struct coresight_desc *desc;
	struct device_node *np = adev->dev.of_node;

	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
	if (!desc)
		return -ENOMEM;

	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
	if (!drvdata)
		return -ENOMEM;

	if (np) {
		pdata = of_get_coresight_platform_data(dev, np);
		if (IS_ERR(pdata))
			return PTR_ERR(pdata);

		adev->dev.platform_data = pdata;
		drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
	}

	drvdata->dev = &adev->dev;
	dev_set_drvdata(dev, drvdata);

	/* Validity for the resource is already checked by the AMBA core */
	base = devm_ioremap_resource(dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);

	drvdata->base = base;

	spin_lock_init(&drvdata->spinlock);

	drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
	if (!IS_ERR(drvdata->atclk)) {
		ret = clk_prepare_enable(drvdata->atclk);
		if (ret)
			return ret;
	}

	drvdata->cpu = pdata ? pdata->cpu : 0;

	get_online_cpus();
	etmdrvdata[drvdata->cpu] = drvdata;

	if (smp_call_function_single(drvdata->cpu,
				     etm_init_arch_data,  drvdata, 1))
		dev_err(dev, "ETM arch init failed\n");

	if (!etm_count++) {
		cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
					  "AP_ARM_CORESIGHT_STARTING",
					  etm_starting_cpu, etm_dying_cpu);
		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
						"AP_ARM_CORESIGHT_ONLINE",
						etm_online_cpu, NULL);
		if (ret < 0)
			goto err_arch_supported;
		hp_online = ret;
	}
	put_online_cpus();

	if (etm_arch_supported(drvdata->arch) == false) {
		ret = -EINVAL;
		goto err_arch_supported;
	}

	etm_init_trace_id(drvdata);
	etm_set_default(&drvdata->config);

	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
	desc->ops = &etm_cs_ops;
	desc->pdata = pdata;
	desc->dev = dev;
	desc->groups = coresight_etm_groups;
	drvdata->csdev = coresight_register(desc);
	if (IS_ERR(drvdata->csdev)) {
		ret = PTR_ERR(drvdata->csdev);
		goto err_arch_supported;
	}

	ret = etm_perf_symlink(drvdata->csdev, true);
	if (ret) {
		coresight_unregister(drvdata->csdev);
		goto err_arch_supported;
	}

	pm_runtime_put(&adev->dev);
	dev_info(dev, "%s initialized\n", (char *)id->data);
	if (boot_enable) {
		coresight_enable(drvdata->csdev);
		drvdata->boot_enable = true;
	}

	return 0;

err_arch_supported:
	if (--etm_count == 0) {
		cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
		if (hp_online)
			cpuhp_remove_state_nocalls(hp_online);
	}
	return ret;
}
/*
 * Initialize the context management stuff.
 */
void __init mmu_context_init(void)
{
	/* Mark init_mm as being active on all possible CPUs since
	 * we'll get called with prev == init_mm the first time
	 * we schedule on a given CPU
	 */
	init_mm.context.active = NR_CPUS;

	/*
	 *   The MPC8xx has only 16 contexts.  We rotate through them on each
	 * task switch.  A better way would be to keep track of tasks that
	 * own contexts, and implement an LRU usage.  That way very active
	 * tasks don't always have to pay the TLB reload overhead.  The
	 * kernel pages are mapped shared, so the kernel can run on behalf
	 * of any task that makes a kernel entry.  Shared does not mean they
	 * are not protected, just that the ASID comparison is not performed.
	 *      -- Dan
	 *
	 * The IBM4xx has 256 contexts, so we can just rotate through these
	 * as a way of "switching" contexts.  If the TID of the TLB is zero,
	 * the PID/TID comparison is disabled, so we can use a TID of zero
	 * to represent all kernel pages as shared among all contexts.
	 * 	-- Dan
	 *
	 * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
	 * should normally never have to steal though the facility is
	 * present if needed.
	 *      -- BenH
	 */
	if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
		first_context = 0;
		last_context = 15;
		no_selective_tlbil = true;
	} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
		first_context = 1;
		last_context = 65535;
		no_selective_tlbil = false;
	} else {
		first_context = 1;
		last_context = 255;
		no_selective_tlbil = false;
	}

#ifdef DEBUG_CLAMP_LAST_CONTEXT
	last_context = DEBUG_CLAMP_LAST_CONTEXT;
#endif
	/*
	 * Allocate the maps used by context management
	 */
	context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0);
	context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0);
#ifndef CONFIG_SMP
	stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
#else
	stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);

	cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
				  "powerpc/mmu/ctx:prepare",
				  mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
#endif

	printk(KERN_INFO
	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
	       2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
	       last_context - first_context + 1);

	/*
	 * Some processors have too few contexts to reserve one for
	 * init_mm, and require using context 0 for a normal task.
	 * Other processors reserve the use of context zero for the kernel.
	 * This code assumes first_context < 32.
	 */
	context_map[0] = (1 << first_context) - 1;
	next_context = first_context;
	nr_free_contexts = last_context - first_context + 1;
}
예제 #9
0
static int register_cavium_notifier(void)
{
	return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
					 "mips/cavium:prepare",
					 octeon_update_boot_vector, NULL);
}
예제 #10
0
static inline void fpsimd_hotplug_init(void)
{
	cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
				  NULL, fpsimd_cpu_dead);
}
예제 #11
0
static int __init b15_rac_init(void)
{
	struct device_node *dn, *cpu_dn;
	int ret = 0, cpu;
	u32 reg, en_mask = 0;

	dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
	if (!dn)
		return -ENODEV;

	if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
		goto out;

	b15_rac_base = of_iomap(dn, 0);
	if (!b15_rac_base) {
		pr_err("failed to remap BIU control base\n");
		ret = -ENOMEM;
		goto out;
	}

	cpu_dn = of_get_cpu_node(0, NULL);
	if (!cpu_dn) {
		ret = -ENODEV;
		goto out;
	}

	if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
		rac_flush_offset = B15_RAC_FLUSH_REG;
	else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
		rac_flush_offset = B53_RAC_FLUSH_REG;
	else {
		pr_err("Unsupported CPU\n");
		of_node_put(cpu_dn);
		ret = -EINVAL;
		goto out;
	}
	of_node_put(cpu_dn);

	ret = register_reboot_notifier(&b15_rac_reboot_nb);
	if (ret) {
		pr_err("failed to register reboot notifier\n");
		iounmap(b15_rac_base);
		goto out;
	}

	if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
					"arm/cache-b15-rac:dead",
					NULL, b15_rac_dead_cpu);
		if (ret)
			goto out_unmap;

		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
					"arm/cache-b15-rac:dying",
					NULL, b15_rac_dying_cpu);
		if (ret)
			goto out_cpu_dead;
	}

	if (IS_ENABLED(CONFIG_PM_SLEEP))
		register_syscore_ops(&b15_rac_syscore_ops);

	spin_lock(&rac_lock);
	reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
	for_each_possible_cpu(cpu)
		en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
	WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");

	b15_rac_enable();
	set_bit(RAC_ENABLED, &b15_rac_flags);
	spin_unlock(&rac_lock);

	pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n",
		b15_rac_base + RAC_CONFIG0_REG);

	goto out;

out_cpu_dead:
	cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
out_unmap:
	unregister_reboot_notifier(&b15_rac_reboot_nb);
	iounmap(b15_rac_base);
out:
	of_node_put(dn);
	return ret;
}