Пример #1
0
int cpu_manager::reserve_cpu_for_thread(pthread_t tid, int suggested_cpu /* = NO_CPU */)
{
	lock();
	int cpu = g_n_thread_cpu_core;
	if (cpu != NO_CPU) { //already reserved
		unlock();
		return cpu;
	}

	cpu_set_t cpu_set;
	CPU_ZERO(&cpu_set);

	int ret = pthread_getaffinity_np(tid, sizeof(cpu_set_t), &cpu_set);
	if (ret) {
		unlock();
		__log_err("pthread_getaffinity_np failed for tid=%lu, ret=%d (errno=%d %m)", tid, ret, errno);
		return -1;
	}

	int avail_cpus = CPU_COUNT(&cpu_set);
	if (avail_cpus == 0) {
		unlock();
		__log_err("no cpu available for tid=%lu", tid);
		return -1;
	}

	if (avail_cpus == 1) { //already attached
		for (cpu = 0; cpu < MAX_CPU && !CPU_ISSET(cpu, &cpu_set); cpu++) {}
	} else { //need to choose one cpu to attach to
		int min_cpu_count = -1;
		for (int i = 0, j = 0; i < MAX_CPU && j < avail_cpus; i++) {
			if (!CPU_ISSET(i, &cpu_set)) continue;
			j++;
			if (min_cpu_count < 0 || m_cpu_thread_count[i] < min_cpu_count) {
				min_cpu_count = m_cpu_thread_count[i];
				cpu = i;
			}
		}
		if (suggested_cpu >= 0
			&& CPU_ISSET(suggested_cpu, &cpu_set)
			&& m_cpu_thread_count[suggested_cpu] <= min_cpu_count + 1 ) {
			cpu = suggested_cpu;
		}
		CPU_ZERO(&cpu_set);
		CPU_SET(cpu, &cpu_set);
		__log_dbg("attach tid=%lu running on cpu=%d to cpu=%d", tid, sched_getcpu(), cpu);
		ret = pthread_setaffinity_np(tid, sizeof(cpu_set_t), &cpu_set);
		if (ret) {
			unlock();
			__log_err("pthread_setaffinity_np failed for tid=%lu to cpu=%d, ret=%d (errno=%d %m)", tid, cpu, ret, errno);
			return -1;
		}
	}

	g_n_thread_cpu_core = cpu;
	if (cpu > NO_CPU && cpu < MAX_CPU)
		m_cpu_thread_count[cpu]++;
	unlock();
	return cpu;
}
Пример #2
0
epoll_wait_call::epoll_wait_call(epoll_event *extra_events_buffer, offloaded_mode_t *off_modes_buffer,
                                 int epfd, epoll_event *events, int maxevents, 
                                 int timeout, const sigset_t *sigmask /* = NULL */) :
	io_mux_call(NULL, off_modes_buffer, 0, sigmask),  // TODO: rethink on these arguments
	m_epfd(epfd), m_events(events), m_maxevents(maxevents), m_timeout(timeout),
	m_p_ready_events(extra_events_buffer)
{
	// get epfd_info
	m_epfd_info = fd_collection_get_epfd(epfd);
	if (!m_epfd_info || maxevents <= 0) {
		__log_dbg("error, epfd %d not found or maxevents <= 0 (=%d)", epfd, maxevents);
		errno = maxevents <= 0 ? EINVAL : EBADF;
		vma_throw_object(io_mux_call::io_error);
	}

	// create stats
	m_p_stats = &m_epfd_info->stats()->stats;
}
Пример #3
0
void vma_shmem_stats_close()
{
	if (g_sh_mem_info.p_sh_stats && g_sh_mem_info.p_sh_stats != MAP_FAILED) {
		__log_dbg("file '%s' fd %d shared memory at %p with %d max blocks\n", g_sh_mem_info.filename_sh_stats, g_sh_mem_info.fd_sh_stats, g_sh_mem_info.p_sh_stats, safe_mce_sys().stats_fd_num_max);

		BULLSEYE_EXCLUDE_BLOCK_START
		if (munmap(g_sh_mem_info.p_sh_stats, SHMEM_STATS_SIZE(safe_mce_sys().stats_fd_num_max)) != 0) {
			vlog_printf(VLOG_ERROR, "%s: file [%s] fd [%d] error while unmap shared memory at [%p]\n", __func__, g_sh_mem_info.filename_sh_stats, g_sh_mem_info.fd_sh_stats, g_sh_mem_info.p_sh_stats);
		}
		BULLSEYE_EXCLUDE_BLOCK_END

		g_sh_mem_info.p_sh_stats = MAP_FAILED;

		if (g_sh_mem_info.fd_sh_stats)
			close(g_sh_mem_info.fd_sh_stats);

		if(!g_is_forked_child)
			unlink(g_sh_mem_info.filename_sh_stats);
	} else if (g_sh_mem_info.p_sh_stats != MAP_FAILED) {
Пример #4
0
void vma_shmem_stats_open(vlog_levels_t** p_p_vma_log_level, uint8_t** p_p_vma_log_details)
{
	void *buf = NULL;
	void *p_shmem = NULL;
	int ret;
	size_t shmem_size = 0;
	mode_t saved_mode;

	g_p_stats_data_reader = new stats_data_reader();

	BULLSEYE_EXCLUDE_BLOCK_START
	if (NULL == g_p_stats_data_reader) {
		vlog_printf(VLOG_ERROR,"%s:%d: Can't allocate g_p_stats_data_reader \n", __func__, __LINE__);
		goto shmem_error;
	}
	BULLSEYE_EXCLUDE_BLOCK_END

	shmem_size = SHMEM_STATS_SIZE(safe_mce_sys().stats_fd_num_max);
	buf = malloc(shmem_size);
	if (buf == NULL)
		goto shmem_error;
	memset(buf, 0, shmem_size);

	p_shmem = buf;

	if (strlen(safe_mce_sys().stats_shmem_dirname) <= 0)
		goto no_shmem;

	g_sh_mem_info.filename_sh_stats[0] = '\0';
	g_sh_mem_info.p_sh_stats = MAP_FAILED;
	ret = snprintf(g_sh_mem_info.filename_sh_stats, sizeof(g_sh_mem_info.filename_sh_stats), "%s/vmastat.%d", safe_mce_sys().stats_shmem_dirname, getpid());
	if (!((0 < ret) && (ret < (int)sizeof(g_sh_mem_info.filename_sh_stats)))) {
		vlog_printf(VLOG_ERROR, "%s: Could not create file under %s %m\n", __func__, safe_mce_sys().stats_shmem_dirname, errno);
		goto no_shmem;
	}
	saved_mode = umask(0);
	g_sh_mem_info.fd_sh_stats = open(g_sh_mem_info.filename_sh_stats, O_CREAT|O_RDWR,
			S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
	umask(saved_mode);

	BULLSEYE_EXCLUDE_BLOCK_START
	if (g_sh_mem_info.fd_sh_stats < 0) {
		vlog_printf(VLOG_ERROR, "%s: Could not open %s %m\n", __func__, g_sh_mem_info.filename_sh_stats, errno);
		goto no_shmem;
	}
	BULLSEYE_EXCLUDE_BLOCK_END

	ret = write(g_sh_mem_info.fd_sh_stats, buf, shmem_size);

	BULLSEYE_EXCLUDE_BLOCK_START
	if (ret < 0) {
		vlog_printf(VLOG_ERROR, "%s: Could not write to %s - %m\n", __func__, g_sh_mem_info.filename_sh_stats, errno);
		goto no_shmem;
	}
	BULLSEYE_EXCLUDE_BLOCK_END

	g_sh_mem_info.p_sh_stats = mmap(0, shmem_size, PROT_WRITE|PROT_READ, MAP_SHARED, g_sh_mem_info.fd_sh_stats, 0);

	BULLSEYE_EXCLUDE_BLOCK_START
	if (g_sh_mem_info.p_sh_stats == MAP_FAILED) {
		vlog_printf(VLOG_ERROR, "%s: MAP_FAILED for %s - %m\n", __func__, g_sh_mem_info.filename_sh_stats);
		goto no_shmem;
	}
	BULLSEYE_EXCLUDE_BLOCK_END

	p_shmem = g_sh_mem_info.p_sh_stats;

	free(buf);
	buf = NULL;

	goto success;

	no_shmem:
	if (g_sh_mem_info.p_sh_stats == MAP_FAILED) {
		if (g_sh_mem_info.fd_sh_stats > 0) {
			close(g_sh_mem_info.fd_sh_stats);
			unlink(g_sh_mem_info.filename_sh_stats);
		}
	}

	g_sh_mem_info.p_sh_stats = 0;

	success:

	MAP_SH_MEM(g_sh_mem, p_shmem);

	write_version_details_to_shmem(&g_sh_mem->ver_info);
	memcpy(g_sh_mem->stats_protocol_ver, STATS_PROTOCOL_VER, min(sizeof(g_sh_mem->stats_protocol_ver), sizeof(STATS_PROTOCOL_VER)));
	g_sh_mem->max_skt_inst_num = safe_mce_sys().stats_fd_num_max;
	g_sh_mem->reader_counter = 0;
	__log_dbg("file '%s' fd %d shared memory at %p with %d max blocks\n", g_sh_mem_info.filename_sh_stats, g_sh_mem_info.fd_sh_stats, g_sh_mem_info.p_sh_stats, safe_mce_sys().stats_fd_num_max);

	// Update the shmem initial log values
	g_sh_mem->log_level = **p_p_vma_log_level;
	g_sh_mem->log_details_level = **p_p_vma_log_details;

	// Update the shmem with initial fd dump values
	g_sh_mem->fd_dump = STATS_FD_STATISTICS_DISABLED;
	g_sh_mem->fd_dump_log_level = STATS_FD_STATISTICS_LOG_LEVEL_DEFAULT;

	// ReMap internal log level to ShMem area
	*p_p_vma_log_level = &g_sh_mem->log_level;
	*p_p_vma_log_details = &g_sh_mem->log_details_level;

	g_p_stats_data_reader->register_to_timer();

	return;

	shmem_error:

	BULLSEYE_EXCLUDE_BLOCK_START
	g_sh_mem_info.fd_sh_stats = -1;
	g_sh_mem_info.p_sh_stats = MAP_FAILED;
	g_sh_mem = &g_local_sh_mem;
	g_sh_mem->reset();
	*p_p_vma_log_level = &g_sh_mem->log_level;
	*p_p_vma_log_details = &g_sh_mem->log_details_level;
	BULLSEYE_EXCLUDE_BLOCK_END
}
Пример #5
0
int cpu_manager::reserve_cpu_for_thread(pthread_t tid, int suggested_cpu /* = NO_CPU */)
{
	lock();
	int cpu = g_n_thread_cpu_core;
	if (cpu != NO_CPU) { //already reserved
		unlock();
		return cpu;
	}

	cpu_set_t* cpu_set = NULL;
	cpu_set = CPU_ALLOC(MAX_CPU);
	if (!cpu_set) {
		unlock();
		__log_err("failed to allocate cpu set");
		return -1;
	}
	size_t cpu_set_size = CPU_ALLOC_SIZE(MAX_CPU);
	CPU_ZERO_S(cpu_set_size, cpu_set);
	if (pthread_getaffinity_np(tid, cpu_set_size, cpu_set)) {
		unlock();
		CPU_FREE(cpu_set);
		__log_err("pthread_getaffinity_np failed for tid=%lu (errno=%d %m)", tid, errno);
		return -1;
	}

	if (CPU_COUNT_S(cpu_set_size, cpu_set) == 0) {
		unlock();
		__log_err("no cpu available for tid=%lu", tid);
		CPU_FREE(cpu_set);
		return -1;
	}

	if (CPU_COUNT_S(cpu_set_size, cpu_set) == 1) { //already attached
		for (cpu = 0; cpu < MAX_CPU && !CPU_ISSET_S(cpu, cpu_set_size, cpu_set); cpu++) {}
	} else { //need to choose one cpu to attach to
		int min_cpu_count = -1;
		for (int i = 0; i < MAX_CPU; i++) {
			if (!CPU_ISSET_S(i, cpu_set_size, cpu_set)) continue;
			if (min_cpu_count < 0 || m_cpu_thread_count[i] < min_cpu_count) {
				min_cpu_count = m_cpu_thread_count[i];
				cpu = i;
			}
		}
		if (suggested_cpu >= 0
			&& CPU_ISSET_S(suggested_cpu, cpu_set_size, cpu_set)
			&& m_cpu_thread_count[suggested_cpu] <= min_cpu_count + 1 ) {
			cpu = suggested_cpu;
		}
		CPU_ZERO_S(cpu_set_size, cpu_set);
		CPU_SET_S(cpu, cpu_set_size, cpu_set);
		__log_dbg("attach tid=%lu running on cpu=%d to cpu=%d", tid, sched_getcpu(), cpu);
		if (pthread_setaffinity_np(tid, cpu_set_size, cpu_set)) {
			unlock();
			CPU_FREE(cpu_set);
			__log_err("pthread_setaffinity_np failed for tid=%lu to cpu=%d (errno=%d %m)", tid, cpu, errno);
			return -1;
		}
	}

	CPU_FREE(cpu_set);
	g_n_thread_cpu_core = cpu;
	m_cpu_thread_count[cpu]++;
	unlock();
	return cpu;
}