Exemple #1
0
static int loadavg_proc_show(struct seq_file *m, void *v)
{
	unsigned long avnrun[3], nr_runnable = 0;
	struct cpumask cpus_allowed;
	int i;

	rcu_read_lock();
	if (task_in_nonroot_cpuacct(current) &&
		in_noninit_pid_ns(current->nsproxy->pid_ns)) {

		get_avenrun_from_tsk(current, avnrun, FIXED_1/200, 0);

		cpumask_copy(&cpus_allowed, cpu_possible_mask);
		if (task_subsys_state(current, cpuset_subsys_id)) {
			memset(&cpus_allowed, 0, sizeof(cpus_allowed));
			get_tsk_cpu_allowed(current, &cpus_allowed);
		}

		for_each_cpu_and(i, cpu_possible_mask, &cpus_allowed)
			nr_runnable += task_ca_running(current, i);

	} else {
		get_avenrun(avnrun, FIXED_1/200, 0);
		nr_runnable = nr_running();
	}
	rcu_read_unlock();

	seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
		LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
		LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
		LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
		nr_running(), nr_threads,
		task_active_pid_ns(current)->last_pid);
	return 0;
}
Exemple #2
0
static void xen_send_IPI_mask(const struct cpumask *mask,
			      enum ipi_vector vector)
{
	unsigned cpu;

	for_each_cpu_and(cpu, mask, cpu_online_mask)
		xen_send_IPI_one(cpu, vector);
}
Exemple #3
0
static ssize_t store_rps_map(struct netdev_rx_queue *queue,
			     const char *buf, size_t len)
{
	struct rps_map *old_map, *map;
	cpumask_var_t mask;
	int err, cpu, i;
	static DEFINE_MUTEX(rps_map_mutex);

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
		return -ENOMEM;

	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
	if (err) {
		free_cpumask_var(mask);
		return err;
	}

	map = kzalloc(max_t(unsigned int,
			    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
		      GFP_KERNEL);
	if (!map) {
		free_cpumask_var(mask);
		return -ENOMEM;
	}

	i = 0;
	for_each_cpu_and(cpu, mask, cpu_online_mask)
		map->cpus[i++] = cpu;

	if (i) {
		map->len = i;
	} else {
		kfree(map);
		map = NULL;
	}

	mutex_lock(&rps_map_mutex);
	old_map = rcu_dereference_protected(queue->rps_map,
					    mutex_is_locked(&rps_map_mutex));
	rcu_assign_pointer(queue->rps_map, map);

	if (map)
		static_key_slow_inc(&rps_needed);
	if (old_map)
		static_key_slow_dec(&rps_needed);

	mutex_unlock(&rps_map_mutex);

	if (old_map)
		kfree_rcu(old_map, rcu);

	free_cpumask_var(mask);
	return len;
}
Exemple #4
0
void xen_send_IPI_mask(const struct cpumask *cpumask, int vector)
{
	unsigned int cpu;
	unsigned long flags;

	local_irq_save(flags);
	WARN_ON(!cpumask_subset(cpumask, cpu_online_mask));
	for_each_cpu_and(cpu, cpumask, cpu_online_mask)
		notify_remote_via_ipi(vector, cpu);
	local_irq_restore(flags);
}
Exemple #5
0
static void __clear_irq_vector(int irq)
{
	int vector, cpu;
	cpumask_t domain;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
	vector = cfg->vector;
	domain = cfg->domain;
	for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
		per_cpu(vector_irq, cpu)[vector] = -1;
	cfg->vector = IRQ_VECTOR_UNASSIGNED;
	cfg->domain = CPU_MASK_NONE;
	irq_status[irq] = IRQ_UNUSED;
	cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
}