static void static_key_slow_dec_cpuslocked(struct static_key *key, unsigned long rate_limit, struct delayed_work *work) { /* * The negative count check is valid even when a negative * key->enabled is in use by static_key_slow_inc(); a * __static_key_slow_dec() before the first static_key_slow_inc() * returns is unbalanced, because all other static_key_slow_inc() * instances block while the update is in progress. */ if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { WARN(atomic_read(&key->enabled) < 0, "jump label: negative count!\n"); return; } if (rate_limit) { atomic_inc(&key->enabled); schedule_delayed_work(work, rate_limit); } else { jump_label_update(key); } jump_label_unlock(); }
void put_callchain_buffers(void) { if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { release_callchain_buffers(); mutex_unlock(&callchain_mutex); } }
void jump_label_dec(struct jump_label_key *key) { if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) return; jump_label_update(key, JUMP_LABEL_DISABLE); jump_label_unlock(); }
static void __static_key_slow_dec(struct static_key *key, unsigned long rate_limit, struct delayed_work *work) { if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { WARN(atomic_read(&key->enabled) < 0, "jump label: negative count!\n"); return; } if (rate_limit) { atomic_inc(&key->enabled); schedule_delayed_work(work, rate_limit); } else { if (!jump_label_get_branch_default(key)) jump_label_update(key, JUMP_LABEL_DISABLE); else jump_label_update(key, JUMP_LABEL_ENABLE); } jump_label_unlock(); }