bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { return alloc_cpumask_var(mask, flags | __GFP_ZERO); }
static int __init alloc_frozen_cpus(void) { if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) return -ENOMEM; return 0; }
static void __init init_irq_default_affinity(void) { alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); cpumask_setall(irq_default_affinity); }
int rtas_ibm_suspend_me(struct rtas_args *args) { long state; long rc; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); cpumask_var_t offline_mask; int cpuret; if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; /* Make sure the state is valid */ rc = plpar_hcall(H_VASI_STATE, retbuf, ((u64)args->args[0] << 32) | args->args[1]); state = retbuf[0]; if (rc) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); return rc; } else if (state == H_VASI_ENABLED) { args->args[args->nargs] = RTAS_NOT_SUSPENDABLE; return 0; } else if (state != H_VASI_SUSPENDING) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", state); args->args[args->nargs] = -1; return 0; } if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) return -ENOMEM; atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); data.token = rtas_token("ibm,suspend-me"); data.complete = &done; /* All present CPUs must be online */ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); cpuret = rtas_online_cpus_mask(offline_mask); if (cpuret) { pr_err("%s: Could not bring present CPUs online.\n", __func__); atomic_set(&data.error, cpuret); goto out; } stop_topology_update(); /* Call function on all CPUs. One of us will make the * rtas call */ if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) atomic_set(&data.error, -EINVAL); wait_for_completion(&done); if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); start_topology_update(); /* Take down CPUs not online prior to suspend */ cpuret = rtas_offline_cpus_mask(offline_mask); if (cpuret) pr_warn("%s: Could not restore CPUs to offline state.\n", __func__); out: free_cpumask_var(offline_mask); return atomic_read(&data.error); }
/** * kthread_create_on_node - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @node: memory node number. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(). * * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give -1. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) { struct kthread_create_info create; create.threadfn = threadfn; create.data = data; create.node = node; init_completion(&create.done); spin_lock(&kthread_create_lock); list_add_tail(&create.list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); wait_for_completion(&create.done); if (!IS_ERR(create.result)) { static const struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); cpumask_var_t in_mask; alloc_cpumask_var(&in_mask, GFP_KERNEL); set_cpus_allowed_ptr(create.result, in_mask); free_cpumask_var(in_mask); } return create.result; } EXPORT_SYMBOL(kthread_create_on_node); static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) { /* Must have done schedule() in kthread() before we set_task_cpu */ if (!wait_task_inactive(p, state)) { WARN_ON(1); return; } /* It's safe because the task is inactive. */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_NO_SETAFFINITY; } /** * kthread_bind - bind a just-created kthread to a cpu. * @p: thread created by kthread_create(). * @cpu: cpu (might not be online, must be possible) for @k to run on. * * Description: This function is equivalent to set_cpus_allowed(), * except that @cpu doesn't need to be online, and the thread must be * stopped (i.e., just returned from kthread_create()). */ void kthread_bind(struct task_struct *p, unsigned int cpu) { __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); }
/* * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process */ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { cpumask_var_t cpus_allowed, new_mask, effective_mask; struct thread_info *ti; struct task_struct *p; int retval; if (len < sizeof(new_mask)) return -EINVAL; if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) return -EFAULT; get_online_cpus(); rcu_read_lock(); p = find_process_by_pid(pid); if (!p) { rcu_read_unlock(); put_online_cpus(); return -ESRCH; } /* Prevent p going away */ get_task_struct(p); rcu_read_unlock(); if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { retval = -ENOMEM; goto out_put_task; } if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { retval = -ENOMEM; goto out_free_cpus_allowed; } if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) { retval = -ENOMEM; goto out_free_new_mask; } retval = -EPERM; if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) goto out_unlock; retval = security_task_setscheduler(p); if (retval) goto out_unlock; /* Record new user-specified CPU set for future reference */ cpumask_copy(&p->thread.user_cpus_allowed, new_mask); again: /* Compute new global allowed CPU set if necessary */ ti = task_thread_info(p); if (test_ti_thread_flag(ti, TIF_FPUBOUND) && cpus_intersects(*new_mask, mt_fpu_cpumask)) { cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask); retval = set_cpus_allowed_ptr(p, effective_mask); } else { cpumask_copy(effective_mask, new_mask); clear_ti_thread_flag(ti, TIF_FPUBOUND); retval = set_cpus_allowed_ptr(p, new_mask); } if (!retval) { cpuset_cpus_allowed(p, cpus_allowed); if (!cpumask_subset(effective_mask, cpus_allowed)) { /* * We must have raced with a concurrent cpuset * update. Just reset the cpus_allowed to the * cpuset's cpus_allowed */ cpumask_copy(new_mask, cpus_allowed); goto again; } } out_unlock: free_cpumask_var(effective_mask); out_free_new_mask: free_cpumask_var(new_mask); out_free_cpus_allowed: free_cpumask_var(cpus_allowed); out_put_task: put_task_struct(p); put_online_cpus(); return retval; }
/* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int mycpu, err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; cpumask_var_t cpumask; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; /* Move the downtaker off the unplug cpu */ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) return -ENOMEM; cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask); free_cpumask_var(cpumask); migrate_disable(); mycpu = smp_processor_id(); if (mycpu == cpu) { printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); migrate_enable(); return -EBUSY; } cpu_hotplug_begin(); err = cpu_unplug_begin(cpu); if (err) { printk("cpu_unplug_begin(%d) failed\n", cpu); goto out_cancel; } err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } BUG_ON(cpu_online(cpu)); /* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away. */ while (!idle_cpu(cpu)) cpu_relax(); /* This actually kills the CPU. */ __cpu_die(cpu); /* CPU is completely dead: tell everyone. Too late to complain. */ cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); out_release: cpu_unplug_done(cpu); out_cancel: migrate_enable(); cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); return err; }
/* ktap mainthread initization, main entry for ktap */ ktap_state *kp_newstate(struct ktap_parm *parm, struct dentry *dir, char **argv) { ktap_state *ks; pid_t pid; int cpu; ks = kzalloc(sizeof(ktap_state) + sizeof(ktap_global_state), GFP_KERNEL); if (!ks) return NULL; ks->stack = kp_malloc(ks, KTAP_STACK_SIZE); G(ks) = (ktap_global_state *)(ks + 1); G(ks)->mainthread = ks; G(ks)->seed = 201236; /* todo: make more random in future */ G(ks)->task = current; G(ks)->verbose = parm->verbose; /* for debug use */ G(ks)->print_timestamp = parm->print_timestamp; G(ks)->workload = parm->workload; INIT_LIST_HEAD(&(G(ks)->timers)); INIT_LIST_HEAD(&(G(ks)->probe_events_head)); G(ks)->exit = 0; if (kp_transport_init(ks, dir)) goto out; pid = (pid_t)parm->trace_pid; if (pid != -1) { struct task_struct *task; rcu_read_lock(); task = pid_task(find_vpid(pid), PIDTYPE_PID); if (!task) { kp_error(ks, "cannot find pid %d\n", pid); rcu_read_unlock(); goto out; } G(ks)->trace_task = task; get_task_struct(task); rcu_read_unlock(); } if( !alloc_cpumask_var(&G(ks)->cpumask, GFP_KERNEL)) goto out; cpumask_copy(G(ks)->cpumask, cpu_online_mask); cpu = parm->trace_cpu; if (cpu != -1) { if (!cpu_online(cpu)) { printk(KERN_INFO "ktap: cpu %d is not online\n", cpu); goto out; } cpumask_clear(G(ks)->cpumask); cpumask_set_cpu(cpu, G(ks)->cpumask); } if (cfunction_cache_init(ks)) goto out; kp_tstring_resize(ks, 512); /* set inital string hashtable size */ ktap_init_state(ks); ktap_init_registry(ks); ktap_init_arguments(ks, parm->argc, argv); /* init library */ kp_init_baselib(ks); kp_init_kdebuglib(ks); kp_init_timerlib(ks); kp_init_ansilib(ks); if (alloc_kp_percpu_data()) goto out; if (kp_probe_init(ks)) goto out; return ks; out: G(ks)->exit = 1; kp_final_exit(ks); return NULL; }
static int rps_sock_flow_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned int orig_size, size; int ret, i; struct ctl_table tmp = { .data = &size, .maxlen = sizeof(size), .mode = table->mode }; struct rps_sock_flow_table *orig_sock_table, *sock_table; static DEFINE_MUTEX(sock_flow_mutex); mutex_lock(&sock_flow_mutex); orig_sock_table = rcu_dereference_protected(rps_sock_flow_table, lockdep_is_held(&sock_flow_mutex)); size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { if (size) { if (size > 1<<29) { /* Enforce limit to prevent overflow */ mutex_unlock(&sock_flow_mutex); return -EINVAL; } size = roundup_pow_of_two(size); if (size != orig_size) { sock_table = vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size)); if (!sock_table) { mutex_unlock(&sock_flow_mutex); return -ENOMEM; } rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; sock_table->mask = size - 1; } else sock_table = orig_sock_table; for (i = 0; i < size; i++) sock_table->ents[i] = RPS_NO_CPU; } else sock_table = NULL; if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); if (sock_table) { static_key_slow_inc(&rps_needed); static_key_slow_inc(&rfs_needed); } if (orig_sock_table) { static_key_slow_dec(&rps_needed); static_key_slow_dec(&rfs_needed); synchronize_rcu(); vfree(orig_sock_table); } } } mutex_unlock(&sock_flow_mutex); return ret; } #endif /* CONFIG_RPS */ #ifdef CONFIG_NET_FLOW_LIMIT static DEFINE_MUTEX(flow_limit_update_mutex); static int flow_limit_cpu_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct sd_flow_limit *cur; struct softnet_data *sd; cpumask_var_t mask; int i, len, ret = 0; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; if (write) { ret = cpumask_parse_user(buffer, *lenp, mask); if (ret) goto done; mutex_lock(&flow_limit_update_mutex); len = sizeof(*cur) + netdev_flow_limit_table_len; for_each_possible_cpu(i) { sd = &per_cpu(softnet_data, i); cur = rcu_dereference_protected(sd->flow_limit, lockdep_is_held(&flow_limit_update_mutex)); if (cur && !cpumask_test_cpu(i, mask)) { RCU_INIT_POINTER(sd->flow_limit, NULL); synchronize_rcu(); kfree(cur); } else if (!cur && cpumask_test_cpu(i, mask)) { cur = kzalloc_node(len, GFP_KERNEL, cpu_to_node(i)); if (!cur) { /* not unwinding previous changes */ ret = -ENOMEM; goto write_unlock; } cur->num_buckets = netdev_flow_limit_table_len; rcu_assign_pointer(sd->flow_limit, cur); } } write_unlock: mutex_unlock(&flow_limit_update_mutex); } else { char kbuf[128]; if (*ppos || !*lenp) { *lenp = 0; goto done; } cpumask_clear(mask); rcu_read_lock(); for_each_possible_cpu(i) { sd = &per_cpu(softnet_data, i); if (rcu_dereference(sd->flow_limit)) cpumask_set_cpu(i, mask); } rcu_read_unlock(); len = min(sizeof(kbuf) - 1, *lenp); len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask)); if (!len) { *lenp = 0; goto done; } if (len < *lenp) kbuf[len++] = '\n'; if (copy_to_user(buffer, kbuf, len)) { ret = -EFAULT; goto done; } *lenp = len; *ppos += len; } done: free_cpumask_var(mask); return ret; }
int acpi_processor_preregister_performance( struct acpi_processor_performance *performance) { int count, count_target; int retval = 0; unsigned int i, j; cpumask_var_t covered_cpus; struct acpi_processor *pr; struct acpi_psd_package *pdomain; struct acpi_processor *match_pr; struct acpi_psd_package *match_pdomain; if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) return -ENOMEM; mutex_lock(&performance_mutex); /* * Check if another driver has already registered, and abort before * changing pr->performance if it has. Check input data as well. */ for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) { /* Look only at processors in ACPI namespace */ continue; } if (pr->performance) { retval = -EBUSY; goto err_out; } if (!performance || !per_cpu_ptr(performance, i)) { retval = -EINVAL; goto err_out; } } /* Call _PSD for all CPUs */ for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) continue; pr->performance = per_cpu_ptr(performance, i); cpumask_set_cpu(i, pr->performance->shared_cpu_map); if (acpi_processor_get_psd(pr)) { retval = -EINVAL; continue; } } if (retval) goto err_ret; /* * Now that we have _PSD data from all CPUs, lets setup P-state * domain info. */ cpumask_clear(covered_cpus); for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) continue; if (cpumask_test_cpu(i, covered_cpus)) continue; pdomain = &(pr->performance->domain_info); cpumask_set_cpu(i, pr->performance->shared_cpu_map); cpumask_set_cpu(i, covered_cpus); if (pdomain->num_processors <= 1) continue; /* Validate the Domain info */ count_target = pdomain->num_processors; count = 1; if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; for_each_possible_cpu(j) { if (i == j) continue; match_pr = per_cpu(processors, j); if (!match_pr) continue; match_pdomain = &(match_pr->performance->domain_info); if (match_pdomain->domain != pdomain->domain) continue; /* Here i and j are in the same domain */ if (match_pdomain->num_processors != count_target) { retval = -EINVAL; goto err_ret; } if (pdomain->coord_type != match_pdomain->coord_type) { retval = -EINVAL; goto err_ret; } cpumask_set_cpu(j, covered_cpus); cpumask_set_cpu(j, pr->performance->shared_cpu_map); count++; } for_each_possible_cpu(j) { if (i == j) continue; match_pr = per_cpu(processors, j); if (!match_pr) continue; match_pdomain = &(match_pr->performance->domain_info); if (match_pdomain->domain != pdomain->domain) continue; match_pr->performance->shared_type = pr->performance->shared_type; cpumask_copy(match_pr->performance->shared_cpu_map, pr->performance->shared_cpu_map); } } err_ret: for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr || !pr->performance) continue; /* Assume no coordination on any error parsing domain info */ if (retval) { cpumask_clear(pr->performance->shared_cpu_map); cpumask_set_cpu(i, pr->performance->shared_cpu_map); pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; } pr->performance = NULL; /* Will be set for real in register */ } err_out: mutex_unlock(&performance_mutex); free_cpumask_var(covered_cpus); return retval; }