Esempio n. 1
0
static void nmi_shutdown(void)
{
    nmi_enabled = 0;
    on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
    unregister_die_notifier(&profile_exceptions_nb);
    free_msrs();
}
Esempio n. 2
0
static int allocate_msrs(void)
{
	int success = 1;
	size_t controls_size = sizeof(struct op_msr) * model->num_controls;
	size_t counters_size = sizeof(struct op_msr) * model->num_counters;

	int i;
	for_each_online_cpu (i) {
		cpu_msrs[i].counters = xmalloc_bytes(counters_size);
		if (!cpu_msrs[i].counters) {
			success = 0;
			break;
		}
		cpu_msrs[i].controls = xmalloc_bytes(controls_size);
		if (!cpu_msrs[i].controls) {
			success = 0;
			break;
		}
	}

	if (!success)
		free_msrs();

	return success;
}
Esempio n. 3
0
static int allocate_msrs(void)
{
	int success = 1;
	size_t controls_size = sizeof(struct op_msr) * model->num_controls;
	size_t counters_size = sizeof(struct op_msr) * model->num_counters;

	int i;
	for (i = 0; i < NR_CPUS; ++i) {
		if (!cpu_online(i))
			continue;

		cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
		if (!cpu_msrs[i].counters) {
			success = 0;
			break;
		}
		cpu_msrs[i].controls = kmalloc(controls_size, GFP_KERNEL);
		if (!cpu_msrs[i].controls) {
			success = 0;
			break;
		}
	}

	if (!success)
		free_msrs();

	return success;
}
Esempio n. 4
0
static int allocate_msrs(void)
{
	int success = 1;
	size_t controls_size = sizeof(struct op_msr) * model->num_controls;
	size_t counters_size = sizeof(struct op_msr) * model->num_counters;

	int i;
	for_each_possible_cpu(i) {
		per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
								GFP_KERNEL);
		if (!per_cpu(cpu_msrs, i).counters) {
			success = 0;
			break;
		}
		per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
								GFP_KERNEL);
		if (!per_cpu(cpu_msrs, i).controls) {
			success = 0;
			break;
		}
	}

	if (!success)
		free_msrs();

	return success;
}
Esempio n. 5
0
static int allocate_msrs(void)
{
	size_t controls_size = sizeof(struct op_msr) * model->num_controls;
	size_t counters_size = sizeof(struct op_msr) * model->num_counters;

	int i;
	for_each_possible_cpu(i) {
		per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
							GFP_KERNEL);
		if (!per_cpu(cpu_msrs, i).counters)
			goto fail;
		per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
							GFP_KERNEL);
		if (!per_cpu(cpu_msrs, i).controls)
			goto fail;
	}

	if (!nmi_setup_mux())
		goto fail;

	return 1;

fail:
	free_msrs();
	return 0;
}
Esempio n. 6
0
static int nmi_setup(void)
{
	int err=0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;

	if ((err = register_die_notifier(&profile_exceptions_nb))){
		free_msrs();
		return err;
	}

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	model->fill_in_addresses(&cpu_msrs[0]);
	for_each_possible_cpu (cpu) {
		if (cpu != 0) {
			memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters,
				sizeof(struct op_msr) * model->num_counters);

			memcpy(cpu_msrs[cpu].controls, cpu_msrs[0].controls,
				sizeof(struct op_msr) * model->num_controls);
		}

	}
	on_each_cpu(nmi_save_registers, NULL, 0, 1);
	on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
	nmi_enabled = 1;
	return 0;
}
Esempio n. 7
0
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	/* make variables visible to the nmi handler: */
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	cpu_notifier_register_begin();

	/* Use get/put_online_cpus() to protect 'nmi_enabled' */
	get_online_cpus();
	nmi_enabled = 1;
	/* make nmi_enabled visible to the nmi handler: */
	smp_mb();
	on_each_cpu(nmi_cpu_setup, NULL, 1);
	__register_cpu_notifier(&oprofile_cpu_nb);
	put_online_cpus();

	cpu_notifier_register_done();

	return 0;
fail:
	free_msrs();
	return err;
}
Esempio n. 8
0
static void nmi_shutdown(void)
{
	nmi_enabled = 0;
	on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
	unset_nmi_callback();
	release_lapic_nmi();
	free_msrs();
}
Esempio n. 9
0
static void nmi_shutdown(void)
{
    struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
    nmi_enabled = 0;
    on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
    unregister_die_notifier(&profile_exceptions_nb);
    model->shutdown(msrs);
    free_msrs();
}
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	/* make variables visible to the nmi handler: */
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	nmi_enabled = 1;
	/* make nmi_enabled visible to the nmi handler: */
	smp_mb();
	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
				nmi_cpu_online, nmi_cpu_down_prep);
	if (err < 0)
		goto fail_nmi;
	cpuhp_nmi_online = err;
	return 0;
fail_nmi:
	unregister_nmi_handler(NMI_LOCAL, "oprofile");
fail:
	free_msrs();
	return err;
}
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;


	
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	get_online_cpus();
	register_cpu_notifier(&oprofile_cpu_nb);
	nmi_enabled = 1;
	
	smp_mb();
	on_each_cpu(nmi_cpu_setup, NULL, 1);
	put_online_cpus();

	return 0;
fail:
	free_msrs();
	return err;
}
static void nmi_shutdown(void)
{
	struct op_msrs *msrs;

	cpuhp_remove_state(cpuhp_nmi_online);
	nmi_enabled = 0;
	ctr_running = 0;

	/* make variables visible to the nmi handler: */
	smp_mb();
	unregister_nmi_handler(NMI_LOCAL, "oprofile");
	msrs = &get_cpu_var(cpu_msrs);
	model->shutdown(msrs);
	free_msrs();
	put_cpu_var(cpu_msrs);
}
Esempio n. 13
0
static void nmi_shutdown(void)
{
	struct op_msrs *msrs;

	get_online_cpus();
	unregister_cpu_notifier(&oprofile_cpu_nb);
	on_each_cpu(nmi_cpu_shutdown, NULL, 1);
	nmi_enabled = 0;
	ctr_running = 0;
	put_online_cpus();
	/* make variables visible to the nmi handler: */
	smp_mb();
	unregister_nmi_handler(NMI_LOCAL, "oprofile");
	msrs = &get_cpu_var(cpu_msrs);
	model->shutdown(msrs);
	free_msrs();
	put_cpu_var(cpu_msrs);
}
Esempio n. 14
0
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		err = -ENOMEM;
	else if (!nmi_setup_mux())
		err = -ENOMEM;
	else
		err = register_die_notifier(&profile_exceptions_nb);

	if (err) {
		free_msrs();
		nmi_shutdown_mux();
		return err;
	}

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}
	on_each_cpu(nmi_cpu_setup, NULL, 1);
	nmi_enabled = 1;
	return 0;
}
Esempio n. 15
0
int nmi_reserve_counters(void)
{
	if (!allocate_msrs())
		return -ENOMEM;

	/* We walk a thin line between law and rape here.
	 * We need to be careful to install our NMI handler
	 * without actually triggering any NMIs as this will
	 * break the core code horrifically.
	 */
	if (reserve_lapic_nmi() < 0) {
		free_msrs();
		return -EBUSY;
	}
	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */
	on_each_cpu(nmi_save_registers, NULL, 1);
	return 0;
}
Esempio n. 16
0
static int nmi_setup(void)
{
    int err=0;

    if (!allocate_msrs())
        return -ENOMEM;

    if ((err = register_die_notifier(&profile_exceptions_nb))) {
        free_msrs();
        return err;
    }

    /* We need to serialize save and setup for HT because the subset
     * of msrs are distinct for save and setup operations
     */
    on_each_cpu(nmi_save_registers, NULL, 0, 1);
    on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
    nmi_enabled = 1;
    return 0;
}
Esempio n. 17
0
void nmi_release_counters(void)
{
	on_each_cpu(nmi_cpu_shutdown, NULL, 1);
	release_lapic_nmi();
	free_msrs();
}