Example #1
0
static ssize_t __ref store_cc_enabled(struct kobject *kobj,
		struct kobj_attribute *attr, const char *buf, size_t count)
{
	int ret = 0;
	int val = 0;

	mutex_lock(&core_control_mutex);
	ret = kstrtoint(buf, 10, &val);
	if (ret) {
		pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
		goto done_store_cc;
	}

	if (core_control_enabled == !!val)
		goto done_store_cc;

	core_control_enabled = !!val;
	if (core_control_enabled) {
		pr_info("%s: Core control enabled\n", KBUILD_MODNAME);
		register_cpu_notifier(&msm_thermal_cpu_notifier);
		update_offline_cores(cpus_offlined);
	} else {
		pr_info("%s: Core control disabled\n", KBUILD_MODNAME);
		unregister_cpu_notifier(&msm_thermal_cpu_notifier);
	}

done_store_cc:
	mutex_unlock(&core_control_mutex);
	return count;
}
Example #2
0
static int zram_cpu_init(void)
{
	int ret;
	unsigned int cpu;

	ret = register_cpu_notifier(&zram_cpu_notifier_block);
	if (ret) {
		pr_err("zram: can't register cpu notifier\n");
		goto out;
	}

	get_online_cpus();
	for_each_online_cpu(cpu) {
		void *pcpu = (void *)(long)cpu;
		if (zram_cpu_notifier(&zram_cpu_notifier_block,
				      CPU_UP_PREPARE, pcpu) != NOTIFY_OK)
			goto cleanup;
	}
	put_online_cpus();
	return ret;

cleanup:
	zram_comp_cpus_down();

out:
	put_online_cpus();
	return -ENOMEM;
}
Example #3
0
int __devinit msm_thermal_init(struct msm_thermal_data *pdata)
{
	int ret = 0;

	BUG_ON(!pdata);
	tsens_get_max_sensor_num(&max_tsens_num);
	memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));

	if (create_sensor_id_map())
		return -EINVAL;
	if (check_sensor_id(msm_thermal_info.sensor_id))
		return -EINVAL;

	enabled = 1;
	INIT_DELAYED_WORK(&check_temp_work, check_temp);
	schedule_delayed_work(&check_temp_work, 0);

	if (num_possible_cpus() > 1) {
		mutex_lock(&core_control_mutex);
		core_control_enabled = 1;
		register_cpu_notifier(&msm_thermal_cpu_notifier);
		update_offline_cores(cpus_offlined);
		mutex_unlock(&core_control_mutex);
	}

	return ret;
}
Example #4
0
int kvm_init_srcu(void)
{
	struct task_struct *p;
	int cpu;
	int err;

	get_online_cpus();
	for_each_online_cpu(cpu) {
		p = kthread_create(kvm_rcu_sync_thread, (void *)(long)cpu,
				   "kvmsrcusync/%d", cpu);
		if (IS_ERR(p))
			goto error_out;

		kthread_bind(p, cpu);
		sched_setscheduler(p, SCHED_FIFO, &sync_thread_param);
		per_cpu(sync_thread, cpu) = p;
		wake_up_process(p);
	}
#ifdef CONFIG_HOTPLUG_CPU
	register_cpu_notifier(&cpu_nfb);
#endif /* CONFIG_HOTPLUG_CPU */
	put_online_cpus();

	return 0;

error_out:
	put_online_cpus();
	printk(KERN_ERR "kvm: kvmsrcsync for %d failed\n", cpu);
	err = PTR_ERR(p);
	kvm_exit_srcu();
	return err;
}
Example #5
0
static int __init topology_init(void)
{
	int cpu;

	register_nodes();
	register_cpu_notifier(&sysfs_cpu_nb);

	for_each_possible_cpu(cpu) {
		struct cpu *c = &per_cpu(cpu_devices, cpu);

		/*
		 * For now, we just see if the system supports making
		 * the RTAS calls for CPU hotplug.  But, there may be a
		 * more comprehensive way to do this for an individual
		 * CPU.  For instance, the boot cpu might never be valid
		 * for hotplugging.
		 */
		if (ppc_md.cpu_die)
			c->hotpluggable = 1;

		if (cpu_online(cpu) || c->hotpluggable) {
			register_cpu(c, cpu);

			device_create_file(&c->dev, &dev_attr_physical_id);
		}

		if (cpu_online(cpu))
			register_cpu_online(cpu);
	}
#ifdef CONFIG_PPC64
	sysfs_create_dscr_default();
#endif /* CONFIG_PPC64 */

	return 0;
}
/*
 * One-time initialisation.
 */
static int __init arch_hw_breakpoint_init(void)
{
	core_num_brps = get_num_brps();
	core_num_wrps = get_num_wrps();

	pr_info("found %d breakpoint and %d watchpoint registers.\n",
		core_num_brps, core_num_wrps);

	/*
	 * Reset the breakpoint resources. We assume that a halting
	 * debugger will leave the world in a nice state for us.
	 */
	smp_call_function(reset_ctrl_regs, NULL, 1);
	reset_ctrl_regs(NULL);

	/* Register debug fault handlers. */
	hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
			      TRAP_HWBKPT, "hw-breakpoint handler");
	hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
			      TRAP_HWBKPT, "hw-watchpoint handler");

	/* Register hotplug notifier. */
	register_cpu_notifier(&hw_breakpoint_reset_nb);

	return 0;
}
Example #7
0
static __init int irq_work_init_cpu_notifier(void)
{
	cpu_notify.notifier_call = irq_work_cpu_notify;
	cpu_notify.priority = 0;
	register_cpu_notifier(&cpu_notify);
	return 0;
}
Example #8
0
static ssize_t __ref store_cc_enabled(struct kobject *kobj,
		struct kobj_attribute *attr, const char *buf, size_t count)
{
	int ret = 0;
	int val = 0;

	ret = kstrtoint(buf, 10, &val);
	if (ret) {
		pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
		goto done_store_cc;
	}

	if (core_control_enabled == !!val)
		goto done_store_cc;

	core_control_enabled = !!val;
	if (core_control_enabled) {
		pr_info("%s: Core control enabled\n", KBUILD_MODNAME);
		register_cpu_notifier(&msm_thermal_cpu_notifier);
		if (hotplug_task)
			complete(&hotplug_notify_complete);
		else
			pr_err("%s: Hotplug task is not initialized\n",
					KBUILD_MODNAME);
	} else {
		pr_info("%s: Core control disabled\n", KBUILD_MODNAME);
		unregister_cpu_notifier(&msm_thermal_cpu_notifier);
	}

done_store_cc:
	return count;
}
Example #9
0
int __init metag_generic_timer_init(void)
{
	/*
	 * On Meta 2 SoCs, the actual frequency of the timer is based on the
	 * Meta core clock speed divided by an integer, so it is only
	 * approximately 1MHz. Calculating the real frequency here drastically
	 * reduces clock skew on these SoCs.
	 */
#ifdef CONFIG_METAG_META21
	hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1);
#endif
	pr_info("Timer frequency: %u Hz\n", hwtimer_freq);

	clocksource_register_hz(&clocksource_metag, hwtimer_freq);

	setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);

	/* Configure timer on boot CPU */
	arch_timer_setup(smp_processor_id());

	/* Hook cpu boot to configure other CPU's timers */
	register_cpu_notifier(&arch_timer_cpu_nb);

	return 0;
}
Example #10
0
static int __init pseries_processor_idle_init(void)
{
	int retval;

	retval = pseries_idle_probe();
	if (retval)
		return retval;

	pseries_cpuidle_driver_init();
	retval = cpuidle_register_driver(&pseries_idle_driver);
	if (retval) {
		printk(KERN_DEBUG "Registration of pseries driver failed.\n");
		return retval;
	}

	retval = pseries_idle_devices_init();
	if (retval) {
		pseries_idle_devices_uninit();
		cpuidle_unregister_driver(&pseries_idle_driver);
		return retval;
	}

	register_cpu_notifier(&setup_hotplug_notifier);
	printk(KERN_DEBUG "pseries_idle_driver registered\n");

	return 0;
}
Example #11
0
static int __cpuinit setup_cpu_watcher(struct notifier_block *notifier,
				       unsigned long event, void *data)
{
	unsigned int i;

	static struct xenbus_watch __cpuinitdata cpu_watch = {
		.node = "cpu",
		.callback = handle_vcpu_hotplug_event,
		.flags = XBWF_new_thread };
	(void)register_xenbus_watch(&cpu_watch);

	if (!is_initial_xendomain()) {
		for_each_possible_cpu(i)
			vcpu_hotplug(i);
		printk(KERN_INFO "Brought up %ld CPUs\n",
		       (long)num_online_cpus());
	}

	return NOTIFY_DONE;
}

static int __init setup_vcpu_hotplug_event(void)
{
	static struct notifier_block hotplug_cpu = {
		.notifier_call = smpboot_cpu_notify };
	static struct notifier_block __cpuinitdata xsn_cpu = {
		.notifier_call = setup_cpu_watcher };

	if (!is_running_on_xen())
		return -ENODEV;

	register_cpu_notifier(&hotplug_cpu);
	register_xenstore_notifier(&xsn_cpu);

	return 0;
}

arch_initcall(setup_vcpu_hotplug_event);

int __ref smp_suspend(void)
{
	unsigned int cpu;
	int err;

	for_each_online_cpu(cpu) {
		if (cpu == 0)
			continue;
		err = cpu_down(cpu);
		if (err) {
			printk(KERN_CRIT "Failed to take all CPUs "
			       "down: %d.\n", err);
			for_each_possible_cpu(cpu)
				vcpu_hotplug(cpu);
			return err;
		}
	}

	return 0;
}
Example #12
0
__init int spawn_ksoftirqd(void)
{
	void *cpu = (void *)(long)smp_processor_id();
	cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
	register_cpu_notifier(&cpu_nfb);
	return 0;
}
Example #13
0
File: tasklet.c Project: fdario/xen
void __init tasklet_subsys_init(void)
{
    void *hcpu = (void *)(long)smp_processor_id();
    cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
    register_cpu_notifier(&cpu_nfb);
    open_softirq(TASKLET_SOFTIRQ, tasklet_softirq_action);
    tasklets_initialised = 1;
}
static int __init test_init(void)
{
	//enable_clock(12, "Vfifo");
	
	register_cpu_notifier(&cpu_nfb);
	start_kicker();
	return 0;
}
static __init int spawn_ksoftirqd(void)
{
	register_cpu_notifier(&cpu_nfb);

	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));

	return 0;
}
void __init hrtimers_init(void)
{
	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
			  (void *)(long)smp_processor_id());
	register_cpu_notifier(&hrtimers_nb);
#ifdef CONFIG_HIGH_RES_TIMERS
	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
#endif
}
Example #17
0
int kvm_timer_hyp_init(void)
{
	struct device_node *np;
	unsigned int ppi;
	int err;

	timecounter = arch_timer_get_timecounter();
	if (!timecounter)
		return -ENODEV;

	np = of_find_matching_node(NULL, arch_timer_of_match);
	if (!np) {
		kvm_err("kvm_arch_timer: can't find DT node\n");
		return -ENODEV;
	}

	ppi = irq_of_parse_and_map(np, 2);
	if (!ppi) {
		kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
		err = -EINVAL;
		goto out;
	}

	err = request_percpu_irq(ppi, kvm_arch_timer_handler,
				 "kvm guest timer", kvm_get_running_vcpus());
	if (err) {
		kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
			ppi, err);
		goto out;
	}

	timer_irq.irq = ppi;

	err = register_cpu_notifier(&kvm_timer_cpu_nb);
	if (err) {
		kvm_err("Cannot register timer CPU notifier\n");
		goto out_free;
	}

	wqueue = create_singlethread_workqueue("kvm_arch_timer");
	if (!wqueue) {
		err = -ENOMEM;
		goto out_free;
	}

	kvm_info("%s IRQ%d\n", np->name, ppi);
	on_each_cpu(kvm_timer_init_interrupt, NULL, 1);

	goto out;
out_free:
	free_percpu_irq(ppi, kvm_get_running_vcpus());
out:
	of_node_put(np);
	return err;
}
/*
 * Called early on to tune the page writeback dirty limits.
 *
 * We used to scale dirty pages according to how total memory
 * related to pages that could be allocated for buffers (by
 * comparing nr_free_buffer_pages() to vm_total_pages.
 *
 * However, that was when we used "dirty_ratio" to scale with
 * all memory, and we don't do that any more. "dirty_ratio"
 * is now applied to total non-HIGHPAGE memory (by subtracting
 * totalhigh_pages from vm_total_pages), and as such we can't
 * get into the old insane situation any more where we had
 * large amounts of dirty pages compared to a small amount of
 * non-HIGHMEM memory.
 *
 * But we might still want to scale the dirty_ratio by how
 * much memory the box has..
 */
void __init page_writeback_init(void)
{
	int shift;

	writeback_set_ratelimit();
	register_cpu_notifier(&ratelimit_nb);

	shift = calc_period_shift();
	prop_descriptor_init(&vm_completions, shift);
	prop_descriptor_init(&vm_dirties, shift);
}
Example #19
0
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	/* make variables visible to the nmi handler: */
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	get_online_cpus();
	register_cpu_notifier(&oprofile_cpu_nb);
	nmi_enabled = 1;
	/* make nmi_enabled visible to the nmi handler: */
	smp_mb();
	on_each_cpu(nmi_cpu_setup, NULL, 1);
	put_online_cpus();

	return 0;
fail:
	free_msrs();
	return err;
}
static int __init pftracer_init(void)
{
	int i, err;

#if defined(ETR_DRAM)
	/* DRAM */
	void *buff;
	dma_addr_t dma_handle;

	buff = dma_alloc_coherent(NULL, ETR_BUFF_SIZE, &dma_handle, GFP_KERNEL);
	if (!buff) {
		return -ENOMEM;
	}
	etb_driver_data.etr_virt = (u32)buff;
	etb_driver_data.etr_phys = dma_handle;
	etb_driver_data.etr_len = ETR_BUFF_SIZE;
	etb_driver_data.use_etr = 1;
	etb_driver_data.etb_regs = IOMEM(DEBUGTOP_BASE + 0x13000);
#elif defined(ETR_SRAM)
	/* SRAM */
	etb_driver_data.etr_virt = (u32)ETR_SRAM_VIRT_BASE;
	etb_driver_data.etr_phys = (dma_addr_t)ETR_SRAM_PHYS_BASE;
	etb_driver_data.etr_len = ETR_BUFF_SIZE;
	etb_driver_data.use_etr = 1;
	etb_driver_data.etb_regs = IOMEM(DEBUGTOP_BASE + 0x13000);
#else
	/* ETB */
	etb_driver_data.use_etr = 0;
	etb_driver_data.etb_regs = IOMEM(DEBUGTOP_BASE + 0x11000);
#endif

	for (i = 0; i < num_possible_cpus(); i++) {
		per_cpu(trace_pwr_down, i) = 0;
		etm_driver_data[i].pwr_down = &(per_cpu(trace_pwr_down, i));
	}

        for (i = 0; i < num_possible_cpus(); i++) {
            err = platform_device_register(&(etm_device[i]));
            if (err) {
                pr_err("Fail to register etm_device %d",i);
                return err;
            }
        }

	err = platform_device_register(&etb_device);
	if (err) {
		pr_err("Fail to register etb_device");
		return err;
	}

	register_cpu_notifier(&pftracer_notifier);

	return 0;
}
Example #21
0
static int __init dummy_timer_register(void)
{
	int err = register_cpu_notifier(&dummy_timer_cpu_nb);
	if (err)
		return err;

	/* We won't get a call on the boot CPU, so register immediately */
	if (num_possible_cpus() > 1)
		dummy_timer_setup();

	return 0;
}
Example #22
0
DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
{
    int rc;

# ifdef CPU_DOWN_FAILED
    RTCpuSetEmpty(&g_MpPendingOfflineSet);
# endif

    rc = register_cpu_notifier(&g_NotifierBlock);
    AssertMsgReturn(!rc, ("%d\n", rc), RTErrConvertFromErrno(rc));
    return VINF_SUCCESS;
}
enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	u32 ret_val;
	if (NULL == mhi_dev_ctxt)
		return MHI_STATUS_ERROR;
	mhi_dev_ctxt->mhi_cpu_notifier.notifier_call = mhi_cpu_notifier_cb;
	ret_val = register_cpu_notifier(&mhi_dev_ctxt->mhi_cpu_notifier);
	if (ret_val)
		return MHI_STATUS_ERROR;
	else
		return MHI_STATUS_SUCCESS;
}
Example #24
0
void tzdev_init_migration(void)
{
    cpumask_setall(&tzdev_cpu_mask[CLUSTER_BIG]);
    cpumask_clear(&tzdev_cpu_mask[CLUSTER_LITTLE]);

    if (strlen(CONFIG_HMP_FAST_CPU_MASK))
        cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, &tzdev_cpu_mask[CLUSTER_BIG]);
    else
        pr_notice("All CPUs are equal, core migration will do nothing.\n");
    cpumask_andnot(&tzdev_cpu_mask[CLUSTER_LITTLE], cpu_present_mask,
                   &tzdev_cpu_mask[CLUSTER_BIG]);
    register_cpu_notifier(&tzdev_cpu_notifier);
}
Example #25
0
/*
 * Called early on to tune the page writeback dirty limits.
 *
 * We used to scale dirty pages according to how total memory
 * related to pages that could be allocated for buffers (by
 * comparing nr_free_buffer_pages() to vm_total_pages.
 *
 * However, that was when we used "dirty_ratio" to scale with
 * all memory, and we don't do that any more. "dirty_ratio"
 * is now applied to total non-HIGHPAGE memory (by subtracting
 * totalhigh_pages from vm_total_pages), and as such we can't
 * get into the old insane situation any more where we had
 * large amounts of dirty pages compared to a small amount of
 * non-HIGHMEM memory.
 *
 * But we might still want to scale the dirty_ratio by how
 * much memory the box has..
 */
void __init page_writeback_init(void)
{
	int shift;

	mod_timer(&wb_timer,
		  jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
	writeback_set_ratelimit();
	register_cpu_notifier(&ratelimit_nb);

	shift = calc_period_shift();
	prop_descriptor_init(&vm_completions, shift);
	prop_descriptor_init(&vm_dirties, shift);
}
Example #26
0
static int gic_clockevent_init(void)
{
	if (!cpu_has_counter || !gic_frequency)
		return -ENXIO;

	setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);

	register_cpu_notifier(&gic_cpu_nb);

	gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));

	return 0;
}
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;


	
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	get_online_cpus();
	register_cpu_notifier(&oprofile_cpu_nb);
	nmi_enabled = 1;
	
	smp_mb();
	on_each_cpu(nmi_cpu_setup, NULL, 1);
	put_online_cpus();

	return 0;
fail:
	free_msrs();
	return err;
}
Example #28
0
static int __init register_pmu_driver(void)
{
	int err;

	err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
	if (err)
		return err;

	err = platform_driver_register(&cpu_pmu_driver);
	if (err)
		unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);

	return err;
}
Example #29
0
int __init watchdog_setup(void)
{
    unsigned int cpu;

    /*
     * Activate periodic heartbeats. We cannot do this earlier during 
     * setup because the timer infrastructure is not available.
     */
    for_each_online_cpu ( cpu )
        cpu_nmi_callback(&cpu_nmi_nfb, CPU_UP_PREPARE, (void *)(long)cpu);
    register_cpu_notifier(&cpu_nmi_nfb);

    watchdog_enable();
    return 0;
}
Example #30
0
/*
 * Called early on to tune the page writeback dirty limits.
 *
 * We used to scale dirty pages according to how total memory
 * related to pages that could be allocated for buffers (by
 * comparing nr_free_buffer_pages() to vm_total_pages.
 *
 * However, that was when we used "dirty_ratio" to scale with
 * all memory, and we don't do that any more. "dirty_ratio"
 * is now applied to total non-HIGHPAGE memory (by subtracting
 * totalhigh_pages from vm_total_pages), and as such we can't
 * get into the old insane situation any more where we had
 * large amounts of dirty pages compared to a small amount of
 * non-HIGHMEM memory.
 *
 * But we might still want to scale the dirty_ratio by how
 * much memory the box has..
 */
void __init page_writeback_init(void)
{
	int shift;

#ifdef CONFIG_DYNAMIC_PAGE_WRITEBACK
	/* Register the dirty page writeback management during suspend/resume */
	register_early_suspend(&dirty_writeback_suspend);
#endif

	writeback_set_ratelimit();
	register_cpu_notifier(&ratelimit_nb);

	shift = calc_period_shift();
	prop_descriptor_init(&vm_completions, shift);
	prop_descriptor_init(&vm_dirties, shift);
}