/**
 *
 * @return the key that is part of a unique id in rings map
 */
uint64_t ring_allocation_logic::calc_res_key_by_logic()
{
	uint64_t res_key = 0;
	switch (m_res_key.get_ring_alloc_logic()) {
	case RING_LOGIC_PER_INTERFACE:
		res_key = 0;
		if (safe_mce_sys().tcp_ctl_thread > CTL_THREAD_DISABLE)
			res_key = 1;
		break;
	case RING_LOGIC_PER_IP:
		res_key = m_source.m_ip;
		break;
	case RING_LOGIC_PER_SOCKET:
		res_key = m_source.m_fd;
		break;
	case RING_LOGIC_PER_USER_ID:
		res_key = m_res_key.get_user_id_key();
		break;
	case RING_LOGIC_PER_THREAD:
		res_key = pthread_self();
		break;
	case RING_LOGIC_PER_CORE:
	case RING_LOGIC_PER_CORE_ATTACH_THREADS:
		res_key = sched_getcpu();
		break;
	BULLSEYE_EXCLUDE_BLOCK_START
	default:
		//not suppose to get here
		ral_logdbg("non-valid ring logic = %d", m_res_key.get_ring_alloc_logic());
		break;
	BULLSEYE_EXCLUDE_BLOCK_END
	}
	return res_key;
}
resource_allocation_key ring_allocation_logic::get_res_key_by_logic()
{
	resource_allocation_key key = DEFAULT_RING_KEY;

	switch (m_ring_allocation_logic) {
	case RING_LOGIC_PER_INTERFACE:
		key = 0;
		break;
	case RING_LOGIC_PER_SOCKET:
		key = m_fd;
		break;
	case RING_LOGIC_PER_THREAD:
		key = pthread_self();
		break;
	case RING_LOGIC_PER_CORE:
	case RING_LOGIC_PER_CORE_ATTACH_THREADS:
		key = sched_getcpu();
		break;
	BULLSEYE_EXCLUDE_BLOCK_START
	default:
		//not suppose to get here
		ral_logdbg("non-valid ring logic = %d", m_ring_allocation_logic);
		break;
	BULLSEYE_EXCLUDE_BLOCK_END
	}

	return key;
}
Beispiel #3
0
/*
 * return true if ring migration is recommended.
 */
bool ring_allocation_logic::should_migrate_ring()
{
	if (!is_logic_support_migration()) {
		return false;
	}

	if (m_ring_migration_ratio < 0) {
		return false;
	}

	ral_logfuncall("currently accessed from thread=%lu, cpu=%d", pthread_self(), sched_getcpu());

	int count_max = m_ring_migration_ratio;
	if (m_migration_candidate) {
		count_max = CANDIDATE_STABILITY_ROUNDS;
		uint64_t new_id = calc_res_key_by_logic();
		if (m_migration_candidate != new_id) {
			m_migration_candidate = 0;
			m_migration_try_count = 0;
			return false;
		}
	}


	if (m_migration_try_count < count_max) {
		m_migration_try_count++;
		return false;
	} else {
		m_migration_try_count = 0;
	}

	if (!m_migration_candidate) {
		// save current used allocation key
		// no need to save profile, and allocation logic
		uint64_t curr_id = m_res_key.get_user_id_key();
		// calc new key
		uint64_t new_id = calc_res_key_by_logic();
		if (new_id == curr_id || g_n_internal_thread_id == curr_id) {
			return false;
		}
		m_migration_candidate = new_id;
		return false;
	}

	ral_logdbg("migrating from ring of id=%s to ring of id=%lu",
		   m_res_key.to_str(), m_migration_candidate);
	m_migration_candidate = 0;

	return true;
}
/*
 * return true if ring migration is recommended for this thread.
 */
bool ring_allocation_logic::should_migrate_ring()
{
	if (m_ring_allocation_logic < RING_LOGIC_PER_THREAD) {
		return false;
	}

	if (m_ring_migration_ratio < 0) {
		return false;
	}

	ral_logfuncall("currently accessed from thread=%lu, cpu=%d", pthread_self(), sched_getcpu());

	int count_max = m_ring_migration_ratio;
	if (m_migration_candidate) {
		count_max = CANDIDATE_STABILITY_ROUNDS;
		resource_allocation_key current_id = get_res_key_by_logic();
		if (m_migration_candidate != current_id) {
			m_migration_candidate = 0;
			m_migration_try_count = 0;
			return false;
		}
	}

	if (m_migration_try_count < count_max) {
		m_migration_try_count++;
		return false;
	} else {
		m_migration_try_count = 0;
	}

	if (!m_migration_candidate) {
		resource_allocation_key current_id = get_res_key_by_logic();
		if (m_res_key == current_id || g_n_internal_thread_id == current_id) {
			return false;
		}
		m_migration_candidate = current_id;
		return false;
	}

	ral_logdbg("migrating from ring of id=%lu to ring of id=%lu", m_res_key, m_migration_candidate);
	m_migration_candidate = 0;

	return true;
}