void context_tracking_cpu_set(int cpu) { if (!per_cpu(context_tracking.active, cpu)) { per_cpu(context_tracking.active, cpu) = true; static_key_slow_inc(&context_tracking_enabled); } }
static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) { struct net *net = current->nsproxy->net_ns; struct tcp_memcontrol *tcp; struct cg_proto *cg_proto; u64 old_lim; int i; int ret; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) return -EINVAL; if (val > RESOURCE_MAX) val = RESOURCE_MAX; tcp = tcp_from_cgproto(cg_proto); old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val); if (ret) return ret; for (i = 0; i < 3; i++) tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT, net->ipv4.sysctl_tcp_mem[i]); if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) static_key_slow_dec(&memcg_socket_limit_enabled); else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) static_key_slow_inc(&memcg_socket_limit_enabled); return 0; }
static void __set_sched_clock_stable(void) { if (!sched_clock_stable()) static_key_slow_inc(&__sched_clock_stable); tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); }
static void mmu_audit_enable(void) { if (mmu_audit) return; static_key_slow_inc(&mmu_audit_key); mmu_audit = true; }
static ssize_t store_rps_map(struct netdev_rx_queue *queue, const char *buf, size_t len) { struct rps_map *old_map, *map; cpumask_var_t mask; int err, cpu, i; static DEFINE_MUTEX(rps_map_mutex); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); if (err) { free_cpumask_var(mask); return err; } map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); if (!map) { free_cpumask_var(mask); return -ENOMEM; } i = 0; for_each_cpu_and(cpu, mask, cpu_online_mask) map->cpus[i++] = cpu; if (i) { map->len = i; } else { kfree(map); map = NULL; } mutex_lock(&rps_map_mutex); old_map = rcu_dereference_protected(queue->rps_map, mutex_is_locked(&rps_map_mutex)); rcu_assign_pointer(queue->rps_map, map); if (map) static_key_slow_inc(&rps_needed); if (old_map) static_key_slow_dec(&rps_needed); mutex_unlock(&rps_map_mutex); if (old_map) kfree_rcu(old_map, rcu); free_cpumask_var(mask); return len; }
void static_key_enable(struct static_key *key) { int count = static_key_count(key); WARN_ON_ONCE(count < 0 || count > 1); if (!count) static_key_slow_inc(key); }
/* * While the jump_label init code needs to happend _after_ the jump labels are * enabled and before SMP is started. Hence we use pre-SMP initcall level * init. We cannot do it in xen_init_spinlocks as that is done before * jump labels are activated. */ static __init int xen_init_spinlocks_jump(void) { if (!xen_pvspin) return 0; if (!xen_domain()) return 0; static_key_slow_inc(¶virt_ticketlocks_enabled); return 0; }
static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) { struct cg_proto *cg_proto; int i; int ret; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) return -EINVAL; if (val > RES_COUNTER_MAX) val = RES_COUNTER_MAX; ret = res_counter_set_limit(&cg_proto->memory_allocated, val); if (ret) return ret; for (i = 0; i < 3; i++) cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT, sysctl_tcp_mem[i]); if (val == RES_COUNTER_MAX) clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); else if (val != RES_COUNTER_MAX) { /* * The active bit needs to be written after the static_key * update. This is what guarantees that the socket activation * function is the last one to run. See sock_update_memcg() for * details, and note that we don't mark any socket as belonging * to this memcg until that flag is up. * * We need to do this, because static_keys will span multiple * sites, but we can't control their order. If we mark a socket * as accounted, but the accounting functions are not patched in * yet, we'll lose accounting. * * We never race with the readers in sock_update_memcg(), * because when this value change, the code to process it is not * patched in yet. * * The activated bit is used to guarantee that no two writers * will do the update in the same memcg. Without that, we can't * properly shutdown the static key. */ if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags)) static_key_slow_inc(&memcg_socket_limit_enabled); set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); } return 0; }
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) { struct nf_hook_entry __rcu **pp; struct nf_hook_entry *entry, *p; if (reg->pf == NFPROTO_NETDEV) { #ifndef CONFIG_NETFILTER_INGRESS if (reg->hooknum == NF_NETDEV_INGRESS) return -EOPNOTSUPP; #endif if (reg->hooknum != NF_NETDEV_INGRESS || !reg->dev || dev_net(reg->dev) != net) return -EINVAL; } pp = nf_hook_entry_head(net, reg); if (!pp) return -EINVAL; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; nf_hook_entry_init(entry, reg); mutex_lock(&nf_hook_mutex); /* Find the spot in the list */ for (; (p = nf_entry_dereference(*pp)) != NULL; pp = &p->next) { if (reg->priority < nf_hook_entry_priority(p)) break; } rcu_assign_pointer(entry->next, p); rcu_assign_pointer(*pp, entry); mutex_unlock(&nf_hook_mutex); #ifdef CONFIG_NETFILTER_INGRESS if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) net_inc_ingress_queue(); #endif #ifdef HAVE_JUMP_LABEL static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); #endif return 0; }
int main (int argc, char *argv[]) { int ret = 0; jump_label_init(); if (static_key_false(&key)) ++ret; else ret += 0; static_key_slow_inc(&key); if (static_key_false(&key)) ret += 0; else ++ret; if (ret) printf("%s\t\tFAIL\n", __FILE__); else printf("%s\t\tOK\n", __FILE__); return 0; }
static int rps_sock_flow_sysctl(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned int orig_size, size; int ret, i; ctl_table tmp = { .data = &size, .maxlen = sizeof(size), .mode = table->mode }; struct rps_sock_flow_table *orig_sock_table, *sock_table; static DEFINE_MUTEX(sock_flow_mutex); mutex_lock(&sock_flow_mutex); orig_sock_table = rcu_dereference_protected(rps_sock_flow_table, lockdep_is_held(&sock_flow_mutex)); size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { if (size) { if (size > 1<<30) { /* Enforce limit to prevent overflow */ mutex_unlock(&sock_flow_mutex); return -EINVAL; } size = roundup_pow_of_two(size); if (size != orig_size) { sock_table = vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size)); if (!sock_table) { mutex_unlock(&sock_flow_mutex); return -ENOMEM; } sock_table->mask = size - 1; } else sock_table = orig_sock_table; for (i = 0; i < size; i++) sock_table->ents[i] = RPS_NO_CPU; } else sock_table = NULL; if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); if (sock_table) static_key_slow_inc(&rps_needed); if (orig_sock_table) { static_key_slow_dec(&rps_needed); synchronize_rcu(); vfree(orig_sock_table); } } } mutex_unlock(&sock_flow_mutex); return ret; } #endif /* CONFIG_RPS */ static struct ctl_table net_core_table[] = { #ifdef CONFIG_NET { .procname = "wmem_max", .data = &sysctl_wmem_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "rmem_max",
static int rps_sock_flow_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned int orig_size, size; int ret, i; struct ctl_table tmp = { .data = &size, .maxlen = sizeof(size), .mode = table->mode }; struct rps_sock_flow_table *orig_sock_table, *sock_table; static DEFINE_MUTEX(sock_flow_mutex); mutex_lock(&sock_flow_mutex); orig_sock_table = rcu_dereference_protected(rps_sock_flow_table, lockdep_is_held(&sock_flow_mutex)); size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { if (size) { if (size > 1<<29) { /* Enforce limit to prevent overflow */ mutex_unlock(&sock_flow_mutex); return -EINVAL; } size = roundup_pow_of_two(size); if (size != orig_size) { sock_table = vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size)); if (!sock_table) { mutex_unlock(&sock_flow_mutex); return -ENOMEM; } rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; sock_table->mask = size - 1; } else sock_table = orig_sock_table; for (i = 0; i < size; i++) sock_table->ents[i] = RPS_NO_CPU; } else sock_table = NULL; if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); if (sock_table) static_key_slow_inc(&rps_needed); if (orig_sock_table) { static_key_slow_dec(&rps_needed); synchronize_rcu(); vfree(orig_sock_table); } } } mutex_unlock(&sock_flow_mutex); return ret; } #endif /* CONFIG_RPS */ #ifdef CONFIG_NET_FLOW_LIMIT static DEFINE_MUTEX(flow_limit_update_mutex); static int flow_limit_cpu_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct sd_flow_limit *cur; struct softnet_data *sd; cpumask_var_t mask; int i, len, ret = 0; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; if (write) { ret = cpumask_parse_user(buffer, *lenp, mask); if (ret) goto done; mutex_lock(&flow_limit_update_mutex); len = sizeof(*cur) + netdev_flow_limit_table_len; for_each_possible_cpu(i) { sd = &per_cpu(softnet_data, i); cur = rcu_dereference_protected(sd->flow_limit, lockdep_is_held(&flow_limit_update_mutex)); if (cur && !cpumask_test_cpu(i, mask)) { RCU_INIT_POINTER(sd->flow_limit, NULL); synchronize_rcu(); kfree(cur); } else if (!cur && cpumask_test_cpu(i, mask)) { cur = kzalloc_node(len, GFP_KERNEL, cpu_to_node(i)); if (!cur) { /* not unwinding previous changes */ ret = -ENOMEM; goto write_unlock; } cur->num_buckets = netdev_flow_limit_table_len; rcu_assign_pointer(sd->flow_limit, cur); } } write_unlock: mutex_unlock(&flow_limit_update_mutex); } else { char kbuf[128]; if (*ppos || !*lenp) { *lenp = 0; goto done; } cpumask_clear(mask); rcu_read_lock(); for_each_possible_cpu(i) { sd = &per_cpu(softnet_data, i); if (rcu_dereference(sd->flow_limit)) cpumask_set_cpu(i, mask); } rcu_read_unlock(); len = min(sizeof(kbuf) - 1, *lenp); len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask)); if (!len) { *lenp = 0; goto done; } if (len < *lenp) kbuf[len++] = '\n'; if (copy_to_user(buffer, kbuf, len)) { ret = -EFAULT; goto done; } *lenp = len; *ppos += len; } done: free_cpumask_var(mask); return ret; }
void opal_tracepoint_regfunc(void) { static_key_slow_inc(&opal_tracepoint_key); }
void ip_tunnel_need_metadata(void) { static_key_slow_inc(&ip_tunnel_metadata_cnt); }
static void __set_sched_clock_stable(void) { if (!sched_clock_stable()) static_key_slow_inc(&__sched_clock_stable); }
static void sched_feat_enable(int i) { if (!static_key_enabled(&sched_feat_keys[i])) static_key_slow_inc(&sched_feat_keys[i]); }