예제 #1
0
파일: ip_frag.cpp 프로젝트: olgk/libvma
ip_frag_manager::ip_frag_manager() : lock_spin("ip_frag_manager")
{
	frag_dbg("");
	m_frag_counter = 0;
	int i;

	
	frag_dbg("NOTE: ip frag periodic timer is disabled until HW supports ip frag offload");
	// g_p_event_handler_manager->register_timer_event(IP_FRAG_CLEANUP_INT, this, PERIODIC_TIMER, 0);

	frag_dbg("Created new IPFRAG MANAGER instance");
	/* allocate hole list */
	desc_base = new ip_frag_desc_t [IP_FRAG_MAX_DESC];
	BULLSEYE_EXCLUDE_BLOCK_START
	if (!desc_base) {
		frag_dbg("Failed to allocate descriptor");
		free_frag_resources();
		throw_vma_exception("Failed to allocate descriptor");
	}
	hole_base = new ip_frag_hole_desc [IP_FRAG_MAX_HOLES];
	if (!hole_base) {
		frag_dbg("Failed to allocate hole descriptor");
		free_frag_resources();
		throw_vma_exception("Failed to allocate hole descriptor");
	}
	BULLSEYE_EXCLUDE_BLOCK_END
	for (i = 0; i < IP_FRAG_MAX_DESC; i++) {
		free_frag_desc(&desc_base[i]);
	}
	for (i = 0; i < IP_FRAG_MAX_HOLES; i++) {
		free_hole_desc(&hole_base[i]);
	}
}
예제 #2
0
void* event_handler_manager::register_timer_event(int timeout_msec, timer_handler* handler, 
						  timer_req_type_t req_type, void* user_data,
						  timers_group* group /* = NULL */)
{
	evh_logdbg("timer handler '%p' registered %s timer for %d msec (user data: %X)",
		   handler, timer_req_type_str(req_type), timeout_msec, user_data);
	BULLSEYE_EXCLUDE_BLOCK_START
	if (!handler || (req_type < 0 || req_type >= INVALID_TIMER)) {
		evh_logwarn("bad timer type (%d) or handler (%p)", req_type, handler);
		return NULL;
	}
	BULLSEYE_EXCLUDE_BLOCK_END

	// malloc here the timer list node in order to return it to the app
	void* node = malloc(sizeof(struct timer_node_t)); 
	BULLSEYE_EXCLUDE_BLOCK_START
	if (!node) {
		evh_logdbg("malloc failure");
		throw_vma_exception("malloc failure");
	}
	BULLSEYE_EXCLUDE_BLOCK_END
	timer_node_t* timer_node = (timer_node_t*)node;
	memset(timer_node, 0, sizeof(*timer_node));
	reg_action_t reg_action;
	memset(&reg_action, 0, sizeof(reg_action));
	reg_action.type = REGISTER_TIMER;
	reg_action.info.timer.handler = handler;
	reg_action.info.timer.user_data = user_data;
	reg_action.info.timer.group = group;
	reg_action.info.timer.node = node;
	reg_action.info.timer.timeout_msec = timeout_msec;
	reg_action.info.timer.req_type = req_type;
	post_new_reg_action(reg_action);
	return node;
}
예제 #3
0
event_handler_manager::event_handler_manager() :
		m_reg_action_q_lock("reg_action_q_lock"),
		m_b_sysvar_internal_thread_arm_cq_enabled(safe_mce_sys().internal_thread_arm_cq_enabled),
		m_n_sysvar_vma_time_measure_num_samples(safe_mce_sys().vma_time_measure_num_samples),
		m_n_sysvar_timer_resolution_msec(safe_mce_sys().timer_resolution_msec)
{
	evh_logfunc("");

	m_cq_epfd = 0;

	m_epfd = orig_os_api.epoll_create(INITIAL_EVENTS_NUM);
	BULLSEYE_EXCLUDE_BLOCK_START
	if (m_epfd == -1) {
		evh_logdbg("epoll_create failed on ibv device collection (errno=%d %m)", errno);
		free_evh_resources();
		throw_vma_exception("epoll_create failed on ibv device collection");
	}
	BULLSEYE_EXCLUDE_BLOCK_END

	m_b_continue_running = true;
	m_event_handler_tid = 0;

	wakeup_set_epoll_fd(m_epfd);
	going_to_sleep();

	return;
}
예제 #4
0
sockinfo::sockinfo(int fd) throw (vma_exception):
		socket_fd_api(fd),
		m_b_closed(false), m_b_blocking(true), m_protocol(PROTO_UNDEFINED),
		m_lock_rcv(MODULE_NAME "::m_lock_rcv"),
		m_lock_snd(MODULE_NAME "::m_lock_snd"),
		m_p_connected_dst_entry(NULL),
		m_so_bindtodevice_ip(0),
		m_p_rx_ring(0),
		m_rx_reuse_buf_pending(false),
		m_rx_reuse_buf_postponed(false),
		m_rx_ring_map_lock(MODULE_NAME "::m_rx_ring_map_lock"),
		m_ring_alloc_logic(fd, this),
		m_n_rx_pkt_ready_list_count(0), m_rx_pkt_ready_offset(0), m_rx_ready_byte_count(0),
		m_rx_num_buffs_reuse(safe_mce_sys().rx_bufs_batch),
		m_rx_callback(NULL),
		m_rx_callback_context(NULL)
{
	m_rx_epfd = orig_os_api.epoll_create(128);
	if (unlikely(m_rx_epfd == -1)) {
	  throw_vma_exception("create internal epoll");
	}
	wakeup_set_epoll_fd(m_rx_epfd);

	m_p_socket_stats = &m_socket_stats; // Save stats as local copy and allow state publisher to copy from this location
	vma_stats_instance_create_socket_block(m_p_socket_stats);
	memset(m_p_socket_stats, 0, sizeof(*m_p_socket_stats));
	m_p_socket_stats->fd = m_fd;
	m_p_socket_stats->inode = fd2inode(m_fd);
	m_p_socket_stats->b_blocking = m_b_blocking;
	m_rx_reuse_buff.n_buff_num = 0;
}
예제 #5
0
파일: sockinfo.cpp 프로젝트: olgk/libvma
sockinfo::sockinfo(int fd):
		socket_fd_api(fd),
		m_b_closed(false),
		m_b_blocking(true),
		m_b_pktinfo(false),
		m_b_rcvtstamp(false),
		m_b_rcvtstampns(false),
		m_n_tsing_flags(0),
		m_protocol(PROTO_UNDEFINED),
		m_lock_rcv(MODULE_NAME "::m_lock_rcv"),
		m_lock_snd(MODULE_NAME "::m_lock_snd"),
		m_p_connected_dst_entry(NULL),
		m_so_bindtodevice_ip(INADDR_ANY),
		m_p_rx_ring(0),
		m_rx_reuse_buf_pending(false),
		m_rx_reuse_buf_postponed(false),
		m_rx_ring_map_lock(MODULE_NAME "::m_rx_ring_map_lock"),
		m_n_rx_pkt_ready_list_count(0), m_rx_pkt_ready_offset(0), m_rx_ready_byte_count(0),
		m_n_sysvar_rx_num_buffs_reuse(safe_mce_sys().rx_bufs_batch),
		m_n_sysvar_rx_poll_num(safe_mce_sys().rx_poll_num),
		m_ring_alloc_log_rx(safe_mce_sys().ring_allocation_logic_rx),
		m_ring_alloc_log_tx(safe_mce_sys().ring_allocation_logic_tx),
		m_pcp(0),
		m_rx_callback(NULL),
		m_rx_callback_context(NULL),
		m_fd_context((void *)((uintptr_t)m_fd)),
		m_flow_tag_id(0),
		m_flow_tag_enabled(false),
		m_n_uc_ttl(safe_mce_sys().sysctl_reader.get_net_ipv4_ttl()),
		m_tcp_flow_is_5t(false),
		m_p_rings_fds(NULL)

{
	m_ring_alloc_logic = ring_allocation_logic_rx(get_fd(), m_ring_alloc_log_rx, this);
	m_rx_epfd = orig_os_api.epoll_create(128);
	if (unlikely(m_rx_epfd == -1)) {
	  throw_vma_exception("create internal epoll");
	}
	wakeup_set_epoll_fd(m_rx_epfd);

	m_p_socket_stats = &m_socket_stats; // Save stats as local copy and allow state publisher to copy from this location
	vma_stats_instance_create_socket_block(m_p_socket_stats);
	m_p_socket_stats->reset();
	m_p_socket_stats->fd = m_fd;
	m_p_socket_stats->inode = fd2inode(m_fd);
	m_p_socket_stats->b_blocking = m_b_blocking;
	m_rx_reuse_buff.n_buff_num = 0;
	memset(&m_so_ratelimit, 0, sizeof(vma_rate_limit_t));
	set_flow_tag(m_fd + 1);
#ifdef DEFINED_SOCKETXTREME 
	m_ec.clear();
	m_socketxtreme_completion = NULL;
	m_socketxtreme_last_buff_lst = NULL;
#endif // DEFINED_SOCKETXTREME 
}
예제 #6
0
void cq_mgr_mp::add_qp_rx(qp_mgr *qp)
{
	cq_logdbg("qp_mp_mgr=%p", qp);
	qp_mgr_mp* mp_qp = dynamic_cast<qp_mgr_mp *>(qp);

	if (mp_qp == NULL) {
		cq_logdbg("this qp is not of type qp_mgr_mp %p", qp);
		throw_vma_exception("this qp is not of type qp_mgr_mp");
	}
	set_qp_rq(qp);
	m_qp_rec.qp = qp;
	if (m_external_mem) {
		cq_logdbg("this qp uses an external memory %p", qp);
	} else {
		if (mp_qp->post_recv(0, mp_qp->get_wq_count()) != 0) {
			cq_logdbg("qp post recv failed");
		} else {
			cq_logdbg("Successfully post_recv qp with %d new Rx buffers",
				  mp_qp->get_wq_count());
		}
	}
}
void ib_ctx_handler_collection::update_tbl(const char *ifa_name)
{
	struct ibv_device **dev_list = NULL;
	ib_ctx_handler * p_ib_ctx_handler = NULL;
	int num_devices = 0;
	int i;

	ibchc_logdbg("Checking for offload capable IB devices...");

	dev_list = vma_ibv_get_device_list(&num_devices);

	BULLSEYE_EXCLUDE_BLOCK_START
	if (!dev_list) {
		ibchc_logerr("Failure in vma_ibv_get_device_list() (error=%d %m)", errno);
		ibchc_logerr("Please check rdma configuration");
		throw_vma_exception("No IB capable devices found!");
	}
	if (!num_devices) {
		vlog_levels_t _level = ifa_name ? VLOG_DEBUG : VLOG_ERROR; // Print an error only during initialization.
		vlog_printf(_level, "VMA does not detect IB capable devices\n");
		vlog_printf(_level, "No performance gain is expected in current configuration\n");
	}

	BULLSEYE_EXCLUDE_BLOCK_END

	for (i = 0; i < num_devices; i++) {
		struct ib_ctx_handler::ib_ctx_handler_desc desc = {dev_list[i]};

		/* 2. Skip existing devices (compare by name) */
		if (ifa_name && !check_device_name_ib_name(ifa_name, dev_list[i]->name)) {
			continue;
		}

		if (ib_ctx_handler::is_mlx4(dev_list[i]->name)) {
			// Note: mlx4 does not support this capability.
			if(safe_mce_sys().enable_socketxtreme) {
				ibchc_logdbg("Blocking offload: mlx4 interfaces in socketxtreme mode");
				continue;
			}

			// Check if mlx4 steering creation is supported.
			// Those setting are passed to the VM by the Hypervisor - NO NEED to specify the param on the VM.
			if (safe_mce_sys().hypervisor == mce_sys_var::HYPER_NONE) {
				check_flow_steering_log_num_mgm_entry_size();
			}
		}

		/* 3. Add new ib devices */
		p_ib_ctx_handler = new ib_ctx_handler(&desc);
		if (!p_ib_ctx_handler) {
			ibchc_logerr("failed allocating new ib_ctx_handler (errno=%d %m)", errno);
			continue;
		}
		m_ib_ctx_map[p_ib_ctx_handler->get_ibv_device()] = p_ib_ctx_handler;
	}

	ibchc_logdbg("Check completed. Found %d offload capable IB devices", m_ib_ctx_map.size());

	if (dev_list) {
		ibv_free_device_list(dev_list);
	}
}