Beispiel #1
0
static ssize_t default_affinity_write(struct file *file,
                                      const char __user *buffer, size_t count, loff_t *ppos)
{
    cpumask_t new_value;
    int err;

    err = cpumask_parse_user(buffer, count, new_value);
    if (err)
        return err;

    if (!is_affinity_mask_valid(new_value))
        return -EINVAL;

    /*
     * Do not allow disabling IRQs completely - it's a too easy
     * way to make the system unusable accidentally :-) At least
     * one online CPU still has to be targeted.
     */
    if (!cpus_intersects(new_value, cpu_online_map))
        return -EINVAL;

    irq_default_affinity = new_value;

    return count;
}
Beispiel #2
0
/*
 * Generic version of the affinity autoselector.
 */
int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
{
	cpumask_t mask;

	if (!irq_can_set_affinity(irq))
		return 0;

	cpus_and(mask, cpu_online_map, irq_default_affinity);

	/*
	 * Preserve an userspace affinity setup, but make sure that
	 * one of the targets is online.
	 */
	if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
		if (cpus_intersects(desc->affinity, cpu_online_map))
			mask = desc->affinity;
		else
			desc->status &= ~IRQ_AFFINITY_SET;
	}

	desc->affinity = mask;
	desc->chip->set_affinity(irq, mask);

	return 0;
}
Beispiel #3
0
static ssize_t irq_affinity_proc_write(struct file *file,
                                       const char __user *buffer, size_t count, loff_t *pos)
{
    unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
    cpumask_t new_value;
    int err;

    if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
            irq_balancing_disabled(irq))
        return -EIO;

    err = cpumask_parse_user(buffer, count, new_value);
    if (err)
        return err;

    if (!is_affinity_mask_valid(new_value))
        return -EINVAL;

    /*
     * Do not allow disabling IRQs completely - it's a too easy
     * way to make the system unusable accidentally :-) At least
     * one online CPU still has to be targeted.
     */
    if (!cpus_intersects(new_value, cpu_online_map))
        /* Special case for empty set - allow the architecture
           code to set default SMP affinity. */
        return irq_select_affinity(irq) ? -EINVAL : count;

    irq_set_affinity(irq, new_value);

    return count;
}
/* Called from set_cpus_allowed().
 * PRE: current holds task_lock(owner)
 * PRE: owner->thread.perfctr == perfctr
 */
void __vperfctr_set_cpus_allowed(struct task_struct *owner,
				 struct vperfctr *perfctr,
				 cpumask_t new_mask)
{
	if (cpus_intersects(new_mask, perfctr_cpus_forbidden_mask)) {
		atomic_set(&perfctr->bad_cpus_allowed, 1);
		if (printk_ratelimit())
			printk(KERN_WARNING "perfctr: process %d (comm %s) issued unsafe"
				" set_cpus_allowed() on process %d (comm %s)\n",
			    	current->pid, current->comm, owner->pid, owner->comm);
	} else
		atomic_set(&perfctr->bad_cpus_allowed, 0);
}
Beispiel #5
0
static void find_best_object(struct topo_obj *d, void *data)
{
	struct obj_placement *best = (struct obj_placement *)data;
	uint64_t newload;
	cpumask_t subset;

	/*
 	 * Don't consider the unspecified numa node here
 	 */
	if (numa_avail && (d->obj_type == OBJ_TYPE_NODE) && (d->number == -1))
		return;

	/*
	 * also don't consider any node that doesn't have at least one cpu in
	 * the unbanned list
	 */
	if ((d->obj_type == OBJ_TYPE_NODE) &&
	    (!cpus_intersects(d->mask, unbanned_cpus)))
		return;

	/*
 	 * If the hint policy is subset, then we only want 
 	 * to consider objects that are within the irqs hint, but
 	 * only if that irq in fact has published a hint
 	 */
	if (best->info->hint_policy == HINT_POLICY_SUBSET) {
		if (!cpus_empty(best->info->affinity_hint)) {
			cpus_and(subset, best->info->affinity_hint, d->mask);
			if (cpus_empty(subset))
				return;
		}
	}

	if (d->powersave_mode)
		return;

	newload = d->load;
	if (newload < best->best_cost) {
		best->best = d;
		best->best_cost = newload;
		best->least_irqs = NULL;
	}

	if (newload == best->best_cost) {
		if (g_list_length(d->interrupts) < g_list_length(best->best->interrupts))
			best->least_irqs = d;
	}
}
Beispiel #6
0
void domain_update_node_affinity(struct domain *d)
{
    cpumask_t cpumask = CPU_MASK_NONE;
    nodemask_t nodemask = NODE_MASK_NONE;
    struct vcpu *v;
    unsigned int node;

    spin_lock(&d->node_affinity_lock);

    for_each_vcpu ( d, v )
        cpus_or(cpumask, cpumask, v->cpu_affinity);

    for_each_online_node ( node )
        if ( cpus_intersects(node_to_cpumask(node), cpumask) )
            node_set(node, nodemask);

    d->node_affinity = nodemask;
    spin_unlock(&d->node_affinity_lock);
}
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_t new_mask;
	cpumask_t effective_mask;
	int retval;
	struct task_struct *p;
	struct thread_info *ti;
	uid_t euid;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	read_lock(&tasklist_lock);

	p = find_process_by_pid(pid);
	if (!p) {
		read_unlock(&tasklist_lock);
		put_online_cpus();
		return -ESRCH;
	}

	/*
	 * It is not safe to call set_cpus_allowed with the
	 * tasklist_lock held.  We will bump the task_struct's
	 * usage count and drop tasklist_lock before invoking
	 * set_cpus_allowed.
	 */
	get_task_struct(p);

	euid = current_euid();
	retval = -EPERM;
	if (euid != p->cred->euid && euid != p->cred->uid &&
	    !capable(CAP_SYS_NICE)) {
		read_unlock(&tasklist_lock);
		goto out_unlock;
	}

	retval = security_task_setscheduler(p, 0, NULL);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	p->thread.user_cpus_allowed = new_mask;

	/* Unlock the task list */
	read_unlock(&tasklist_lock);

	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(new_mask, mt_fpu_cpumask)) {
		cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, &effective_mask);
	} else {
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, &new_mask);
	}

out_unlock:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_var_t cpus_allowed, new_mask, effective_mask;
	struct thread_info *ti;
	struct task_struct *p;
	int retval;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	rcu_read_lock();

	p = find_process_by_pid(pid);
	if (!p) {
		rcu_read_unlock();
		put_online_cpus();
		return -ESRCH;
	}

	/* Prevent p going away */
	get_task_struct(p);
	rcu_read_unlock();

	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_cpus_allowed;
	}
	if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_new_mask;
	}
	retval = -EPERM;
	if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
		goto out_unlock;

	retval = security_task_setscheduler(p);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	cpumask_copy(&p->thread.user_cpus_allowed, new_mask);

 again:
	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(*new_mask, mt_fpu_cpumask)) {
		cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, effective_mask);
	} else {
		cpumask_copy(effective_mask, new_mask);
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, new_mask);
	}

	if (!retval) {
		cpuset_cpus_allowed(p, cpus_allowed);
		if (!cpumask_subset(effective_mask, cpus_allowed)) {
			/*
			 * We must have raced with a concurrent cpuset
			 * update. Just reset the cpus_allowed to the
			 * cpuset's cpus_allowed
			 */
			cpumask_copy(new_mask, cpus_allowed);
			goto again;
		}
	}
out_unlock:
	free_cpumask_var(effective_mask);
out_free_new_mask:
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
	free_cpumask_var(cpus_allowed);
out_put_task:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
Beispiel #9
0
static void do_one_cpu(char *path)
{
	struct topo_obj *cpu;
	FILE *file;
	char new_path[PATH_MAX];
	cpumask_t cache_mask, package_mask;
	struct topo_obj *cache;
	struct topo_obj *package;
	DIR *dir;
	struct dirent *entry;
	int nodeid;
	int packageid = 0;
	unsigned int max_cache_index, cache_index, cache_stat;

	/* skip offline cpus */
	snprintf(new_path, PATH_MAX, "%s/online", path);
	file = fopen(new_path, "r");
	if (file) {
		char *line = NULL;
		size_t size = 0;
		if (getline(&line, &size, file)==0)
			return;
		fclose(file);
		if (line && line[0]=='0') {
			free(line);
			return;
		}
		free(line);
	}

	cpu = calloc(sizeof(struct topo_obj), 1);
	if (!cpu)
		return;

	cpu->obj_type = OBJ_TYPE_CPU;

	cpu->number = strtoul(&path[27], NULL, 10);

	cpu_set(cpu->number, cpu_possible_map);
	
	cpu_set(cpu->number, cpu->mask);

	/*
 	 * Default the cache_domain mask to be equal to the cpu
 	 */
	cpus_clear(cache_mask);
	cpu_set(cpu->number, cache_mask);

	/* if the cpu is on the banned list, just don't add it */
	if (cpus_intersects(cpu->mask, banned_cpus)) {
		free(cpu);
		/* even though we don't use the cpu we do need to count it */
		core_count++;
		return;
	}


	/* try to read the package mask; if it doesn't exist assume solitary */
	snprintf(new_path, PATH_MAX, "%s/topology/core_siblings", path);
	file = fopen(new_path, "r");
	cpu_set(cpu->number, package_mask);
	if (file) {
		char *line = NULL;
		size_t size = 0;
		if (getline(&line, &size, file)) 
			cpumask_parse_user(line, strlen(line), package_mask);
		fclose(file);
		free(line);
	}
	/* try to read the package id */
	snprintf(new_path, PATH_MAX, "%s/topology/physical_package_id", path);
	file = fopen(new_path, "r");
	if (file) {
		char *line = NULL;
		size_t size = 0;
		if (getline(&line, &size, file))
			packageid = strtoul(line, NULL, 10);
		fclose(file);
		free(line);
	}

	/* try to read the cache mask; if it doesn't exist assume solitary */
	/* We want the deepest cache level available */
	cpu_set(cpu->number, cache_mask);
	max_cache_index = 0;
	cache_index = 1;
	cache_stat = 0;
	do {
		struct stat sb;
		snprintf(new_path, PATH_MAX, "%s/cache/index%d/shared_cpu_map", path, cache_index);
		cache_stat = stat(new_path, &sb);
		if (!cache_stat) {
			max_cache_index = cache_index;
			if (max_cache_index == deepest_cache)
				break;
			cache_index ++;
		}
	} while(!cache_stat);

	if (max_cache_index > 0) {
		snprintf(new_path, PATH_MAX, "%s/cache/index%d/shared_cpu_map", path, max_cache_index);
		file = fopen(new_path, "r");
		if (file) {
			char *line = NULL;
			size_t size = 0;
			if (getline(&line, &size, file))
				cpumask_parse_user(line, strlen(line), cache_mask);
			fclose(file);
			free(line);
		}
	}

	nodeid=-1;
	if (numa_avail) {
		dir = opendir(path);
		do {
			entry = readdir(dir);
			if (!entry)
				break;
			if (strstr(entry->d_name, "node")) {
				nodeid = strtoul(&entry->d_name[4], NULL, 10);
				break;
			}
		} while (entry);
		closedir(dir);
	}

	/*
	   blank out the banned cpus from the various masks so that interrupts
	   will never be told to go there
	 */
	cpus_and(cache_mask, cache_mask, unbanned_cpus);
	cpus_and(package_mask, package_mask, unbanned_cpus);

	cache = add_cpu_to_cache_domain(cpu, cache_mask);
	package = add_cache_domain_to_package(cache, packageid, package_mask);
	add_package_to_node(package, nodeid);

	cpu->obj_type_list = &cpus;
	cpus = g_list_append(cpus, cpu);
	core_count++;
}