Esempio n. 1
0
static int do_ltt_statedump(void)
{
	int cpu;

	printk(KERN_DEBUG "do_ltt_statedump\n");
 	ltt_enumerate_process_states();
	ltt_enumerate_file_descriptors();
	list_modules();
	ltt_enumerate_vm_maps();
	list_interrupts();
	ltt_enumerate_network_ip_interface();
	
	/* Fire off a work queue on each CPU. Their sole purpose in life
	 * is to guarantee that each CPU has been in a state where is was in
	 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ) */
	lock_cpu_hotplug();
	atomic_set(&kernel_threads_to_run, num_online_cpus());
	__set_current_state(TASK_UNINTERRUPTIBLE);
	for_each_online_cpu(cpu) {
		INIT_WORK(&cpu_work[cpu], ltt_statedump_work_func, current);
		schedule_delayed_work_on(cpu,&cpu_work[cpu],0);
	}
	unlock_cpu_hotplug();
	/* Wait for all threads to run */
	schedule();
	BUG_ON(atomic_read(&kernel_threads_to_run) != 0);
	/* Our work is done */
	printk(KERN_DEBUG "trace_statedump_statedump_end\n");
	trace_statedump_statedump_end();
	return 0;
}
Esempio n. 2
0
/*
 * Update the present map for a cpu node which is going away, and set
 * the hard id in the paca(s) to -1 to be consistent with boot time
 * convention for non-present cpus.
 */
static void pSeries_remove_processor(struct device_node *np)
{
	unsigned int cpu;
	int len, nthreads, i;
	u32 *intserv;

	intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s", &len);
	if (!intserv)
		return;

	nthreads = len / sizeof(u32);

	lock_cpu_hotplug();
	for (i = 0; i < nthreads; i++) {
		for_each_present_cpu(cpu) {
			if (get_hard_smp_processor_id(cpu) != intserv[i])
				continue;
			BUG_ON(cpu_online(cpu));
			cpu_clear(cpu, cpu_present_map);
			set_hard_smp_processor_id(cpu, -1);
			break;
		}
		if (cpu == NR_CPUS)
			printk(KERN_WARNING "Could not find cpu to remove "
			       "with physical id 0x%x\n", intserv[i]);
	}
	unlock_cpu_hotplug();
}
Esempio n. 3
0
int ltt_statedump_thread(void *data)
{
	struct semaphore work_sema4;
	int cpu;

	printk(KERN_DEBUG "ltt_statedump_thread\n");
 	ltt_enumerate_process_states();
	ltt_enumerate_file_descriptors();
	ltt_enumerate_modules();
	ltt_enumerate_vm_maps();
	ltt_enumerate_interrupts();
	ltt_enumerate_network_ip_interface();
	
	/* Fire off a work queue on each CPU. Their sole purpose in life
	 * is to guarantee that each CPU has been in a state where is was in
	 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ) */
	sema_init(&work_sema4, 1 - num_online_cpus());
	lock_cpu_hotplug();
	for_each_online_cpu(cpu)
	{
		INIT_WORK(&cpu_work[cpu], ltt_statedump_work_func, &work_sema4);
		schedule_delayed_work_on(cpu,&cpu_work[cpu],0);
	}
	unlock_cpu_hotplug();
	/* Wait for all work queues to have completed */
	down(&work_sema4);
	/* Our work is done */
	printk(KERN_DEBUG "trace_statedump_statedump_end\n");
	trace_statedump_statedump_end();
	do_exit(0);
	return 0;
}
Esempio n. 4
0
static int
__init cpufreq_stats_init(void)
{
	int ret;
	unsigned int cpu;

	spin_lock_init(&cpufreq_stats_lock);
	if ((ret = cpufreq_register_notifier(&notifier_policy_block,
				CPUFREQ_POLICY_NOTIFIER)))
		return ret;

	if ((ret = cpufreq_register_notifier(&notifier_trans_block,
				CPUFREQ_TRANSITION_NOTIFIER))) {
		cpufreq_unregister_notifier(&notifier_policy_block,
				CPUFREQ_POLICY_NOTIFIER);
		return ret;
	}

	register_cpu_notifier(&cpufreq_stat_cpu_notifier);
	lock_cpu_hotplug();
	for_each_online_cpu(cpu) {
		cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
			(void *)(long)cpu);
	}
	unlock_cpu_hotplug();
	return 0;
}
static int do_ltt_statedump(struct ltt_probe_private_data *call_data)
{
	int cpu;

	printk(KERN_DEBUG "LTT state dump thread start\n");
	ltt_enumerate_process_states(call_data);
	ltt_enumerate_file_descriptors(call_data);
	list_modules(call_data);
	ltt_enumerate_vm_maps(call_data);
	list_interrupts(call_data);
	ltt_enumerate_network_ip_interface(call_data);

	/*
	 * Fire off a work queue on each CPU. Their sole purpose in life
	 * is to guarantee that each CPU has been in a state where is was in
	 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
	 */
	lock_cpu_hotplug();
	atomic_set(&kernel_threads_to_run, num_online_cpus());
	work_wake_task = current;
	__set_current_state(TASK_UNINTERRUPTIBLE);
	for_each_online_cpu(cpu) {
		INIT_DELAYED_WORK(&cpu_work[cpu], ltt_statedump_work_func);
		schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
	}
	unlock_cpu_hotplug();
	/* Wait for all threads to run */
	schedule();
	BUG_ON(atomic_read(&kernel_threads_to_run) != 0);
	/* Our work is done */
	printk(KERN_DEBUG "LTT state dump end\n");
	__trace_mark(0, list_statedump_end, call_data, MARK_NOARGS);
	return 0;
}
Esempio n. 6
0
int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
	int i, max;
	mtrr_type ltype;
	unsigned long lbase;
	unsigned int lsize;
	int error = -EINVAL;

	if (!mtrr_if)
		return -ENXIO;

	max = num_var_ranges;
	/* No CPU hotplug when we change MTRR entries */
	lock_cpu_hotplug();
	mutex_lock(&mtrr_mutex);
	if (reg < 0) {
		/*  Search for existing MTRR  */
		for (i = 0; i < max; ++i) {
			mtrr_if->get(i, &lbase, &lsize, &ltype);
			if (lbase == base && lsize == size) {
				reg = i;
				break;
			}
		}
		if (reg < 0) {
			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
			       size);
			goto out;
		}
	}
	if (reg >= max) {
		printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
		goto out;
	}
	if (is_cpu(CYRIX) && !use_intel()) {
		if ((reg == 3) && arr3_protected) {
			printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
			goto out;
		}
	}
	mtrr_if->get(reg, &lbase, &lsize, &ltype);
	if (lsize < 1) {
		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
		goto out;
	}
	if (usage_table[reg] < 1) {
		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
		goto out;
	}
	if (--usage_table[reg] < 1)
		set_mtrr(reg, 0, 0, 0);
	error = reg;
 out:
	mutex_unlock(&mtrr_mutex);
	unlock_cpu_hotplug();
	return error;
}
Esempio n. 7
0
static void do_event_scan_all_cpus(long delay)
{
	int cpu;

	lock_cpu_hotplug();
	cpu = first_cpu(cpu_online_map);
	for (;;) {
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		do_event_scan(rtas_token("event-scan"));
		set_cpus_allowed(current, CPU_MASK_ALL);

		/* Drop hotplug lock, and sleep for the specified delay */
		unlock_cpu_hotplug();
		msleep_interruptible(delay);
		lock_cpu_hotplug();

		cpu = next_cpu(cpu, cpu_online_map);
		if (cpu == NR_CPUS)
			break;
	}
	unlock_cpu_hotplug();
}
Esempio n. 8
0
static void
__exit cpufreq_stats_exit(void)
{
	unsigned int cpu;

	cpufreq_unregister_notifier(&notifier_policy_block,
			CPUFREQ_POLICY_NOTIFIER);
	cpufreq_unregister_notifier(&notifier_trans_block,
			CPUFREQ_TRANSITION_NOTIFIER);
	unregister_cpu_notifier(&cpufreq_stat_cpu_notifier);
	lock_cpu_hotplug();
	for_each_online_cpu(cpu) {
		cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD,
			(void *)(long)cpu);
	}
	unlock_cpu_hotplug();
}
Esempio n. 9
0
void flow_cache_flush(void)
{
	struct flow_flush_info info;
	static DECLARE_MUTEX(flow_flush_sem);

	/* Don't want cpus going down or up during this. */
	lock_cpu_hotplug();
	down(&flow_flush_sem);
	atomic_set(&info.cpuleft, num_online_cpus());
	init_completion(&info.completion);

	local_bh_disable();
	smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
	flow_cache_flush_tasklet((unsigned long)&info);
	local_bh_enable();

	wait_for_completion(&info.completion);
	up(&flow_flush_sem);
	unlock_cpu_hotplug();
}
Esempio n. 10
0
int mtrr_add_page(unsigned long base, unsigned long size, 
		  unsigned int type, char increment)
{
	int i;
	mtrr_type ltype;
	unsigned long lbase;
	unsigned int lsize;
	int error;

	if (!mtrr_if)
		return -ENXIO;
		
	if ((error = mtrr_if->validate_add_page(base,size,type)))
		return error;

	if (type >= MTRR_NUM_TYPES) {
		printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
		return -EINVAL;
	}

	/*  If the type is WC, check that this processor supports it  */
	if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
		printk(KERN_WARNING
		       "mtrr: your processor doesn't support write-combining\n");
		return -ENOSYS;
	}

	if (base & size_or_mask || size & size_or_mask) {
		printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
		return -EINVAL;
	}

	error = -EINVAL;

	/* No CPU hotplug when we change MTRR entries */
	lock_cpu_hotplug();
	/*  Search for existing MTRR  */
	mutex_lock(&mtrr_mutex);
	for (i = 0; i < num_var_ranges; ++i) {
		mtrr_if->get(i, &lbase, &lsize, &ltype);
		if (base >= lbase + lsize)
			continue;
		if ((base < lbase) && (base + size <= lbase))
			continue;
		/*  At this point we know there is some kind of overlap/enclosure  */
		if ((base < lbase) || (base + size > lbase + lsize)) {
			printk(KERN_WARNING
			       "mtrr: 0x%lx000,0x%lx000 overlaps existing"
			       " 0x%lx000,0x%x000\n", base, size, lbase,
			       lsize);
			goto out;
		}
		/*  New region is enclosed by an existing region  */
		if (ltype != type) {
			if (type == MTRR_TYPE_UNCACHABLE)
				continue;
			printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
			     base, size, mtrr_attrib_to_str(ltype),
			     mtrr_attrib_to_str(type));
			goto out;
		}
		if (increment)
			++usage_table[i];
		error = i;
		goto out;
	}
	/*  Search for an empty MTRR  */
	i = mtrr_if->get_free_region(base, size);
	if (i >= 0) {
		set_mtrr(i, base, size, type);
		usage_table[i] = 1;
	} else
		printk(KERN_INFO "mtrr: no more MTRRs available\n");
	error = i;
 out:
	mutex_unlock(&mtrr_mutex);
	unlock_cpu_hotplug();
	return error;
}
Esempio n. 11
0
static int rtasd(void *unused)
{
	unsigned int err_type;
	int cpu = 0;
	int event_scan = rtas_token("event-scan");
	cpumask_t all = CPU_MASK_ALL;
	int rc;

	daemonize("rtasd");

	if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1)
		goto error;

	rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER);
	if (!rtas_log_buf) {
		printk(KERN_ERR "rtasd: no memory\n");
		goto error;
	}

	/* We can use rtas_log_buf now */
	no_more_logging = 0;

	printk(KERN_ERR "RTAS daemon started\n");

	DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2);

	/* See if we have any error stored in NVRAM */
	memset(logdata, 0, rtas_error_log_max);

	rc = nvram_read_error_log(logdata, rtas_error_log_max, &err_type);
	if (!rc) {
		if (err_type != ERR_FLAG_ALREADY_LOGGED) {
			pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0);
		}
	}

	/* First pass. */
	lock_cpu_hotplug();
	for_each_online_cpu(cpu) {
		DEBUG("scheduling on %d\n", cpu);
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id());

		do_event_scan(event_scan);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}
	unlock_cpu_hotplug();

	if (surveillance_timeout != -1) {
		DEBUG("enabling surveillance\n");
		enable_surveillance(surveillance_timeout);
		DEBUG("surveillance enabled\n");
	}

	lock_cpu_hotplug();
	cpu = first_cpu_const(mk_cpumask_const(cpu_online_map));
	for (;;) {
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		do_event_scan(event_scan);
		set_cpus_allowed(current, all);

		/* Drop hotplug lock, and sleep for a bit (at least
		 * one second since some machines have problems if we
		 * call event-scan too quickly). */
		unlock_cpu_hotplug();
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout((HZ*60/rtas_event_scan_rate) / 2);
		lock_cpu_hotplug();

		cpu = next_cpu_const(cpu, mk_cpumask_const(cpu_online_map));
		if (cpu == NR_CPUS)
			cpu = first_cpu_const(mk_cpumask_const(cpu_online_map));
	}

error:
	/* Should delete proc entries */
	return -EINVAL;
}