예제 #1
0
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
			 void *data)
{
	struct module *mod = data;
	int ret = 0;

	switch (val) {
	case MODULE_STATE_COMING:
		jump_label_lock();
		ret = jump_label_add_module(mod);
		if (ret)
			jump_label_del_module(mod);
		jump_label_unlock();
		break;
	case MODULE_STATE_GOING:
		jump_label_lock();
		jump_label_del_module(mod);
		jump_label_unlock();
		break;
	case MODULE_STATE_LIVE:
		jump_label_lock();
		jump_label_invalidate_module_init(mod);
		jump_label_unlock();
		break;
	}

	return notifier_from_errno(ret);
}
예제 #2
0
static __init int jump_label_init(void)
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
	struct jump_label_key *key = NULL;
	struct jump_entry *iter;

	jump_label_lock();
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
		arch_jump_label_text_poke_early(iter->code);
		if (iter->key == (jump_label_t)(unsigned long)key)
			continue;

		key = (struct jump_label_key *)(unsigned long)iter->key;
		atomic_set(&key->enabled, 0);
		key->entries = iter;
#ifdef CONFIG_MODULES
		key->next = NULL;
#endif
	}
	jump_label_unlock();

	return 0;
}
예제 #3
0
void __init jump_label_init(void)
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
	struct static_key *key = NULL;
	struct jump_entry *iter;

	jump_label_lock();
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
		struct static_key *iterk;

		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

		iterk = jump_entry_key(iter);
		if (iterk == key)
			continue;

		key = iterk;
		/*
		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
		 */
		*((unsigned long *)&key->entries) += (unsigned long)iter;
#ifdef CONFIG_MODULES
		key->next = NULL;
#endif
	}
	static_key_initialized = true;
	jump_label_unlock();
}
예제 #4
0
void static_key_slow_inc(struct static_key *key)
{
	int v, v1;

	STATIC_KEY_CHECK_USE();

	/*
	 * Careful if we get concurrent static_key_slow_inc() calls;
	 * later calls must wait for the first one to _finish_ the
	 * jump_label_update() process.  At the same time, however,
	 * the jump_label_update() call below wants to see
	 * static_key_enabled(&key) for jumps to be updated properly.
	 *
	 * So give a special meaning to negative key->enabled: it sends
	 * static_key_slow_inc() down the slow path, and it is non-zero
	 * so it counts as "enabled" in jump_label_update().  Note that
	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
	 */
	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
		if (likely(v1 == v))
			return;
	}

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
		jump_label_update(key);
		atomic_set(&key->enabled, 1);
	} else {
		atomic_inc(&key->enabled);
	}
	jump_label_unlock();
}
void __init jump_label_init(void)
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
	struct static_key *key = NULL;
	struct jump_entry *iter;

	jump_label_lock();
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
		struct static_key *iterk;

		iterk = (struct static_key *)(unsigned long)iter->key;
		arch_jump_label_transform_static(iter, jump_label_type(iterk));
		if (iterk == key)
			continue;

		key = iterk;
		*((unsigned long *)&key->entries) += (unsigned long)iter;
#ifdef CONFIG_MODULES
		key->next = NULL;
#endif
	}
	jump_label_unlock();
}
예제 #6
0
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
			 void *data)
{
	struct module *mod = data;
	int ret = 0;

	cpus_read_lock();
	jump_label_lock();

	switch (val) {
	case MODULE_STATE_COMING:
		ret = jump_label_add_module(mod);
		if (ret) {
			WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
			jump_label_del_module(mod);
		}
		break;
	case MODULE_STATE_GOING:
		jump_label_del_module(mod);
		break;
	case MODULE_STATE_LIVE:
		jump_label_invalidate_module_init(mod);
		break;
	}

	jump_label_unlock();
	cpus_read_unlock();

	return notifier_from_errno(ret);
}
예제 #7
0
static void static_key_slow_dec_cpuslocked(struct static_key *key,
					   unsigned long rate_limit,
					   struct delayed_work *work)
{
	/*
	 * The negative count check is valid even when a negative
	 * key->enabled is in use by static_key_slow_inc(); a
	 * __static_key_slow_dec() before the first static_key_slow_inc()
	 * returns is unbalanced, because all other static_key_slow_inc()
	 * instances block while the update is in progress.
	 */
	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
		WARN(atomic_read(&key->enabled) < 0,
		     "jump label: negative count!\n");
		return;
	}

	if (rate_limit) {
		atomic_inc(&key->enabled);
		schedule_delayed_work(work, rate_limit);
	} else {
		jump_label_update(key);
	}
	jump_label_unlock();
}
예제 #8
0
void jump_label_dec(struct jump_label_key *key)
{
	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
		return;

	jump_label_update(key, JUMP_LABEL_DISABLE);
	jump_label_unlock();
}
예제 #9
0
void jump_label_inc(struct jump_label_key *key)
{
	if (atomic_inc_not_zero(&key->enabled))
		return;

	jump_label_lock();
	if (atomic_add_return(1, &key->enabled) == 1)
		jump_label_update(key, JUMP_LABEL_ENABLE);
	jump_label_unlock();
}
예제 #10
0
void static_key_disable_cpuslocked(struct static_key *key)
{
	STATIC_KEY_CHECK_USE(key);

	if (atomic_read(&key->enabled) != 1) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
		return;
	}

	jump_label_lock();
	if (atomic_cmpxchg(&key->enabled, 1, 0))
		jump_label_update(key);
	jump_label_unlock();
}
void static_key_slow_inc(struct static_key *key)
{
	if (atomic_inc_not_zero(&key->enabled))
		return;

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		if (!jump_label_get_branch_default(key))
			jump_label_update(key, JUMP_LABEL_ENABLE);
		else
			jump_label_update(key, JUMP_LABEL_DISABLE);
	}
	atomic_inc(&key->enabled);
	jump_label_unlock();
}
예제 #12
0
파일: jump_label.c 프로젝트: AK101111/linux
void __init jump_label_init(void)
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
	struct static_key *key = NULL;
	struct jump_entry *iter;

	/*
	 * Since we are initializing the static_key.enabled field with
	 * with the 'raw' int values (to avoid pulling in atomic.h) in
	 * jump_label.h, let's make sure that is safe. There are only two
	 * cases to check since we initialize to 0 or 1.
	 */
	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);

	if (static_key_initialized)
		return;

	jump_label_lock();
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
		struct static_key *iterk;

		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

		iterk = jump_entry_key(iter);
		if (iterk == key)
			continue;

		key = iterk;
		/*
		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
		 */
		*((unsigned long *)&key->entries) += (unsigned long)iter;
#ifdef CONFIG_MODULES
		key->next = NULL;
#endif
	}
	static_key_initialized = true;
	jump_label_unlock();
}
예제 #13
0
void static_key_enable_cpuslocked(struct static_key *key)
{
	STATIC_KEY_CHECK_USE(key);

	if (atomic_read(&key->enabled) > 0) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
		return;
	}

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
		jump_label_update(key);
		/*
		 * See static_key_slow_inc().
		 */
		atomic_set_release(&key->enabled, 1);
	}
	jump_label_unlock();
}
static void __static_key_slow_dec(struct static_key *key,
		unsigned long rate_limit, struct delayed_work *work)
{
	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
		WARN(atomic_read(&key->enabled) < 0,
		     "jump label: negative count!\n");
		return;
	}

	if (rate_limit) {
		atomic_inc(&key->enabled);
		schedule_delayed_work(work, rate_limit);
	} else {
		if (!jump_label_get_branch_default(key))
			jump_label_update(key, JUMP_LABEL_DISABLE);
		else
			jump_label_update(key, JUMP_LABEL_ENABLE);
	}
	jump_label_unlock();
}
예제 #15
0
void __init jump_label_init(void)
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
	struct static_key *key = NULL;
	struct jump_entry *iter;

	/*
	 * Since we are initializing the static_key.enabled field with
	 * with the 'raw' int values (to avoid pulling in atomic.h) in
	 * jump_label.h, let's make sure that is safe. There are only two
	 * cases to check since we initialize to 0 or 1.
	 */
	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);

	if (static_key_initialized)
		return;

	cpus_read_lock();
	jump_label_lock();
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
		struct static_key *iterk;

		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

		iterk = jump_entry_key(iter);
		if (iterk == key)
			continue;

		key = iterk;
		static_key_set_entries(key, iter);
	}
	static_key_initialized = true;
	jump_label_unlock();
	cpus_read_unlock();
}