static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { if (len < cpumask_size()) cpumask_clear(new_mask); else if (len > cpumask_size()) len = cpumask_size(); return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; }
static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { unsigned long *k; if (len < cpumask_size()) memset(new_mask, 0, cpumask_size()); else if (len > cpumask_size()) len = cpumask_size(); k = cpumask_bits(new_mask); return compat_get_bitmap(k, user_mask_ptr, len * 8); }
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { int ret; cpumask_var_t mask; if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(compat_ulong_t)-1)) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { size_t retlen = min_t(size_t, len, cpumask_size()); if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) ret = -EFAULT; else ret = retlen; } free_cpumask_var(mask); return ret; }
/* * Take a map of online CPUs and the number of available interrupt vectors * and generate an output cpumask suitable for spreading MSI/MSI-X vectors * so that they are distributed as good as possible around the CPUs. If * more vectors than CPUs are available we'll map one to each CPU, * otherwise we map one to the first sibling of each socket. * * If there are more vectors than CPUs we will still only have one bit * set per CPU, but interrupt code will keep on assigning the vectors from * the start of the bitmap until we run out of vectors. */ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) { struct cpumask *affinity_mask; unsigned int max_vecs = *nr_vecs; if (max_vecs == 1) return NULL; affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL); if (!affinity_mask) { *nr_vecs = 1; return NULL; } if (max_vecs >= num_online_cpus()) { cpumask_copy(affinity_mask, cpu_online_mask); *nr_vecs = num_online_cpus(); } else { unsigned int vecs = 0, cpu; for_each_online_cpu(cpu) { if (cpu == get_first_sibling(cpu)) { cpumask_set_cpu(cpu, affinity_mask); vecs++; } if (--max_vecs == 0) break; } *nr_vecs = vecs; } return affinity_mask; }
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { int ret; cpumask_var_t mask; unsigned long *k; unsigned int min_length = cpumask_size(); if (nr_cpu_ids <= BITS_PER_COMPAT_LONG) min_length = sizeof(compat_ulong_t); if (len < min_length) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret < 0) goto out; k = cpumask_bits(mask); ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); if (ret == 0) ret = min_length; out: free_cpumask_var(mask); return ret; }
/** * alloc_cpumask_var_node - allocate a struct cpumask on a given node * @mask: pointer to cpumask_var_t where the cpumask is returned * @flags: GFP_ flags * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop returning a constant 1 (in <linux/cpumask.h>) * Returns TRUE if memory allocation succeeded, FALSE otherwise. * * In addition, mask will be NULL if this fails. Note that gcc is * usually smart enough to know that mask can never be NULL if * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case * too. */ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) { *mask = kmalloc_node(cpumask_size(), flags, node); #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (!*mask) { printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); dump_stack(); } #endif /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ if (*mask) { unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); unsigned int tail; tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); memset(ptr + cpumask_size() - tail, 0, tail); } return *mask != NULL; }
static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id) { struct cpumask *cpumask; int cpu; cpumask = kzalloc(cpumask_size(), GFP_KERNEL); if (!cpumask) return -ENOMEM; for_each_possible_cpu(cpu) if (smp_cpuid_part(cpu) == part_id) cpumask_set_cpu(cpu, cpumask); drv->cpumask = cpumask; return 0; }
static inline int find_next_online_cpu(struct ehca_comp_pool *pool) { int cpu; unsigned long flags; WARN_ON_ONCE(!in_interrupt()); if (ehca_debug_level >= 3) ehca_dmp(cpu_online_mask, cpumask_size(), ""); spin_lock_irqsave(&pool->last_cpu_lock, flags); cpu = cpumask_next(pool->last_cpu, cpu_online_mask); if (cpu >= nr_cpu_ids) cpu = cpumask_first(cpu_online_mask); pool->last_cpu = cpu; spin_unlock_irqrestore(&pool->last_cpu_lock, flags); return cpu; }
static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id) { struct cpuinfo_arm *cpu_info; struct cpumask *cpumask; unsigned long cpuid; int cpu; cpumask = kzalloc(cpumask_size(), GFP_KERNEL); if (!cpumask) return -ENOMEM; for_each_possible_cpu(cpu) { cpu_info = &per_cpu(cpu_data, cpu); cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id(); /* read cpu id part number */ if ((cpuid & 0xFFF0) == cpu_id) cpumask_set_cpu(cpu, cpumask); } drv->cpumask = cpumask; return 0; }
/** * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var * @mask: cpumask to free */ void __init free_bootmem_cpumask_var(cpumask_var_t mask) { free_bootmem((unsigned long)mask, cpumask_size()); }
/** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop (in <linux/cpumask.h>). * Either returns an allocated (zero-filled) cpumask, or causes the * system to panic. */ void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { *mask = alloc_bootmem(cpumask_size()); }
/** * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var * @mask: cpumask to free */ void __init free_bootmem_cpumask_var(cpumask_var_t mask) { memblock_free_early(__pa(mask), cpumask_size()); }
/** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop (in <linux/cpumask.h>). * Either returns an allocated (zero-filled) cpumask, or causes the * system to panic. */ void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { *mask = memblock_virt_alloc(cpumask_size(), 0); }