static ssize_t default_affinity_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { cpumask_var_t new_value; int err; if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(buffer, count, new_value); if (err) goto out; if (!is_affinity_mask_valid(new_value)) { err = -EINVAL; goto out; } /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ if (!cpumask_intersects(new_value, cpu_online_mask)) { err = -EINVAL; goto out; } cpumask_copy(irq_default_affinity, new_value); err = count; out: free_cpumask_var(new_value); return err; }
static ssize_t default_affinity_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { cpumask_t new_value; int err; err = cpumask_parse_user(buffer, count, new_value); if (err) return err; if (!is_affinity_mask_valid(new_value)) return -EINVAL; /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ if (!cpus_intersects(new_value, cpu_online_map)) return -EINVAL; irq_default_affinity = new_value; return count; }
static int irq_affinity_write_proc(struct file *file, const char __user *buffer, unsigned long count, void *data) { unsigned int irq = (int)(long)data, full_count = count, err; cpumask_t new_value, tmp; if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || irq_balancing_disabled(irq)) return -EIO; err = cpumask_parse_user(buffer, count, new_value); if (err) return err; if (!is_affinity_mask_valid(new_value)) return -EINVAL; /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ cpus_and(tmp, new_value, cpu_online_map); if (cpus_empty(tmp)) /* Special case for empty set - allow the architecture code to set default SMP affinity. */ return select_smp_affinity(irq) ? -EINVAL : full_count; irq_set_affinity(irq, new_value); return full_count; }
static ssize_t irq_affinity_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; cpumask_t new_value; int err; if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || irq_balancing_disabled(irq)) return -EIO; err = cpumask_parse_user(buffer, count, new_value); if (err) return err; if (!is_affinity_mask_valid(new_value)) return -EINVAL; /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ if (!cpus_intersects(new_value, cpu_online_map)) /* Special case for empty set - allow the architecture code to set default SMP affinity. */ return irq_select_affinity(irq) ? -EINVAL : count; irq_set_affinity(irq, new_value); return count; }