Ejemplo n.º 1
0
ssize_t dst_entry_tcp::slow_send(const iovec* p_iov, size_t sz_iov, bool b_blocked /*= true*/, bool is_rexmit /*= false*/, int flags /*= 0*/, socket_fd_api* sock /*= 0*/, tx_call_t call_type /*= 0*/)
{
	ssize_t ret_val = -1;

	NOT_IN_USE(sock);
	NOT_IN_USE(call_type);
	NOT_IN_USE(flags);

	m_slow_path_lock.lock();

	prepare_to_send(true);

	if (m_b_is_offloaded) {
		if (!is_valid()) { // That means that the neigh is not resolved yet
			//there is a copy inside so we should not update any ref-counts
			ret_val = pass_buff_to_neigh(p_iov, sz_iov);
		}
		else {
			ret_val = fast_send(p_iov, sz_iov, b_blocked, is_rexmit);
		}
	}
	else {
		dst_tcp_logdbg("Dst_entry is not offloaded, bug?");
	}
	m_slow_path_lock.unlock();
	return ret_val;
}
Ejemplo n.º 2
0
bool socket_fd_api::is_readable(uint64_t *p_poll_sn, fd_array_t* p_fd_array)
{
	NOT_IN_USE(p_poll_sn);
	NOT_IN_USE(p_fd_array);
	__log_info_funcall("");
	return false;
}
Ejemplo n.º 3
0
ssize_t dst_entry_tcp::slow_send(const iovec* p_iov, size_t sz_iov, bool b_blocked /*= true*/, bool is_rexmit /*= false*/, int flags /*= 0*/, socket_fd_api* sock /*= 0*/, tx_call_t call_type /*= 0*/)
{
	ssize_t ret_val = -1;

	NOT_IN_USE(sock);
	NOT_IN_USE(call_type);
	NOT_IN_USE(flags);

	m_slow_path_lock.lock();

	prepare_to_send(true);

	if (m_b_is_offloaded) {
		if (!is_valid()) { // That means that the neigh is not resolved yet
			if(is_rexmit){
				//drop retransmit packet, and don't save in neigh. if we will want to save in neigh, we need to make copy in save_iovec..()
				m_slow_path_lock.unlock();
				return ret_val;
			}
			ret_val = pass_buff_to_neigh(p_iov, sz_iov);
		}
		else {
			ret_val = fast_send(p_iov, sz_iov, b_blocked, is_rexmit);
		}
	}
	else {
		dst_tcp_logdbg("Dst_entry is not offloaded, bug?");
	}
	m_slow_path_lock.unlock();
	return ret_val;
}
Ejemplo n.º 4
0
size_t sockinfo::handle_msg_trunc(size_t total_rx, size_t payload_size, int in_flags, int* p_out_flags)
{
	NOT_IN_USE(payload_size);
	NOT_IN_USE(in_flags);
	*p_out_flags &= ~MSG_TRUNC; //don't handle msg_trunc
	return total_rx;
}
Ejemplo n.º 5
0
int ring_bond::vma_poll(struct vma_completion_t *vma_completions, unsigned int ncompletions, int flags)
{
	NOT_IN_USE(vma_completions);
	NOT_IN_USE(ncompletions);
	NOT_IN_USE(flags);

	return 0;
}
Ejemplo n.º 6
0
uint32_t ib_ctx_time_converter::get_device_convertor_status(struct ibv_context* ctx) {
	uint32_t dev_status = 0;
#ifdef DEFINED_IBV_EXP_CQ_TIMESTAMP
	int rval;

	// Checking if ibv_exp_query_device() is valid
	struct ibv_exp_device_attr device_attr;
	memset(&device_attr, 0, sizeof(device_attr));
	device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_WITH_HCA_CORE_CLOCK;

	if ((rval = ibv_exp_query_device(ctx ,&device_attr)) || !device_attr.hca_core_clock) {
		vlog_printf(VLOG_DEBUG, "ib_ctx_time_converter::get_device_convertor_status :Error in querying hca core clock "
				"(ibv_exp_query_device() return value=%d ) (ibv context %p) (errno=%d %m)\n", rval, ctx, errno);
	} else {
		dev_status |= IBV_EXP_QUERY_DEVICE_SUPPORTED;
	}

	// Checking if ibv_exp_query_values() is valid
	struct ibv_exp_values queried_values;
	memset(&queried_values, 0, sizeof(queried_values));
	queried_values.comp_mask = IBV_EXP_VALUES_HW_CLOCK;

	if ((rval = ibv_exp_query_values(ctx,IBV_EXP_VALUES_HW_CLOCK, &queried_values)) || !queried_values.hwclock) {
		vlog_printf(VLOG_DEBUG, "ib_ctx_time_converter::get_device_convertor_status :Error in querying hw clock, can't convert"
				" hw time to system time (ibv_exp_query_values() return value=%d ) (ibv context %p) (errno=%d %m)\n", rval, ctx, errno);
	} else {
		dev_status |= IBV_EXP_QUERY_VALUES_SUPPORTED;
	}
#else
	NOT_IN_USE(ctx);
#endif
	return dev_status;
}
Ejemplo n.º 7
0
ssize_t dst_entry_udp::slow_send(const iovec* p_iov, size_t sz_iov, bool b_blocked /*= true*/, bool is_rexmit /*= false*/, int flags /*= 0*/, socket_fd_api* sock /*= 0*/, tx_call_t call_type /*= 0*/)
{
	NOT_IN_USE(is_rexmit);

	ssize_t ret_val = 0;

	dst_udp_logdbg("In slow send");

	prepare_to_send();

	if (m_b_force_os || !m_b_is_offloaded) {
		struct sockaddr_in to_saddr;
		to_saddr.sin_port = m_dst_port;
		to_saddr.sin_addr.s_addr = m_dst_ip.get_in_addr();
		to_saddr.sin_family = AF_INET;
		dst_udp_logdbg("Calling to tx_os");
		ret_val = sock->tx_os(call_type, p_iov, sz_iov, flags, (const struct sockaddr*)&to_saddr, sizeof(struct sockaddr_in));
	}
	else {
		if (!is_valid()) { // That means that the neigh is not resolved yet
			ret_val = pass_buff_to_neigh(p_iov, sz_iov);
		}
		else {
			ret_val = fast_send(p_iov, sz_iov, b_blocked);
		}
	}

	return ret_val;
}
Ejemplo n.º 8
0
//The following function supposed to be called under m_lock
bool dst_entry_udp_mc::resolve_net_dev(bool is_connect)
{
	NOT_IN_USE(is_connect);
	bool ret_val = false;
	cache_entry_subject<ip_address, net_device_val*>* p_ces = NULL;

	if (m_mc_tx_if_ip.get_in_addr() != INADDR_ANY && !m_mc_tx_if_ip.is_mc()) {
		if(m_p_net_dev_entry == NULL && g_p_net_device_table_mgr->register_observer(m_mc_tx_if_ip.get_in_addr(), this, &p_ces)) {
			m_p_net_dev_entry = dynamic_cast<net_device_entry*>(p_ces);
		}
		if (m_p_net_dev_entry) {
			m_p_net_dev_entry->get_val(m_p_net_dev_val);
			if (m_p_net_dev_val) {
				ret_val = alloc_transport_dep_res();
			}
			else {
				dst_udp_mc_logdbg("Valid netdev value not found");
			}
		}
		else {
			m_b_is_offloaded = false;
			dst_udp_mc_logdbg("Netdev is not offloaded fallback to OS");
		}
	}
	else {
		ret_val = dst_entry::resolve_net_dev();
	}
	return ret_val;
}
Ejemplo n.º 9
0
void route_table_mgr::notify_cb(event *ev)
{
	NOT_IN_USE(ev); //TODO remove
#if 0
	route_nl_event *route_netlink_ev = dynamic_cast <route_nl_event*>(ev);
	const netlink_route_info *netlink_route_info = route_netlink_ev->get_route_info();
	uint16_t event_type = route_netlink_ev->nl_type;

	if(! is_route_event(event_type))
		return;

	rt_mgr_logdbg("received route event from netlink");
	route_val netlink_route_val;
	create_route_val_from_info(netlink_route_info, netlink_route_val);

	switch(event_type)
	{
	case RTM_DELROUTE:
		del_route_event(netlink_route_val);
		break;
	case RTM_NEWROUTE:
		new_route_event(netlink_route_val);
		break;
	}
#endif
}
Ejemplo n.º 10
0
ssize_t dst_entry_tcp::pass_buff_to_neigh(const iovec * p_iov, size_t & sz_iov, uint16_t packet_id)
{
	NOT_IN_USE(packet_id);
	m_header_neigh.init();
	m_header_neigh.configure_tcp_ports(m_dst_port, m_src_port);
	return(dst_entry::pass_buff_to_neigh(p_iov, sz_iov));
}
Ejemplo n.º 11
0
void net_device_entry::handle_timer_expired(void* user_data)
{
	NOT_IN_USE(user_data);
	auto_unlocker lock(m_lock);
	net_device_val* p_ndv = dynamic_cast<net_device_val*>(m_val);
	if (p_ndv) {
		if(m_bond == net_device_val::ACTIVE_BACKUP) {
			if(p_ndv->update_active_backup_slaves()) {
				//active slave was changed
				notify_observers();
			}
		} else if(m_bond == net_device_val::LAG_8023ad){
			if(p_ndv->update_active_slaves()) {
				//slave state was changed
				g_p_event_handler_manager->unregister_timer_event(this, m_timer_handle);
				m_timer_handle = g_p_event_handler_manager->register_timer_event(SLAVE_CHECK_TIMER_PERIOD_MSEC, this, PERIODIC_TIMER, 0);
				notify_observers();
			} else {
				if (timer_count >= 0) {
					timer_count++;
					if (timer_count == SLAVE_CHECK_FAST_NUM_TIMES) {
						timer_count = -1;
						g_p_event_handler_manager->unregister_timer_event(this, m_timer_handle);
						m_timer_handle = g_p_event_handler_manager->register_timer_event(SLAVE_CHECK_TIMER_PERIOD_MSEC, this, PERIODIC_TIMER, 0);
					}
				}
			}
		}
	}
}
Ejemplo n.º 12
0
void pipeinfo::handle_timer_expired(void* user_data)
{
	NOT_IN_USE(user_data);
	pi_logfunc("(m_write_count=%d)", m_write_count);
	m_lock_tx.lock();
	write_lbm_pipe_enhance();
	m_lock_tx.unlock();
}
Ejemplo n.º 13
0
void vlogger_timer_handler::handle_timer_expired(void* user_data)
{
	NOT_IN_USE(user_data);
	if (g_p_vlogger_level)
		g_vlogger_level = *g_p_vlogger_level;
	if (g_p_vlogger_details)
		g_vlogger_details = *g_p_vlogger_details;			
}
Ejemplo n.º 14
0
// Create rule entry object for given destination key and fill it with matching rule value from rule table.
// Parameters: 
//		key		: key object that contain information about destination.
//		obs		: object that contain observer for specific rule entry.
//	Returns created rule entry object.
rule_entry* rule_table_mgr::create_new_entry(route_rule_table_key key, const observer *obs)
{
	rr_mgr_logdbg("");
	NOT_IN_USE(obs);
	rule_entry* p_ent = new rule_entry(key);
	update_entry(p_ent);
	rr_mgr_logdbg("new entry %p created successfully", p_ent);
	return p_ent;
}
Ejemplo n.º 15
0
int vma_lwip::sockaddr2ipaddr(const sockaddr *__to, socklen_t __tolen, ip_addr_t &ip, uint16_t &port)
{
	NOT_IN_USE(__tolen);
	if (get_sa_family(__to)	 != AF_INET)
		return -1;

	ip.addr = get_sa_ipv4_addr(__to);
	port = htons(get_sa_port(__to));
	return 0;
}
Ejemplo n.º 16
0
route_entry* route_table_mgr::create_new_entry(route_rule_table_key key, const observer *obs)
{
	// no need for lock - lock is activated in cache_collection_mgr::register_observer

	rt_mgr_logdbg("");
	NOT_IN_USE(obs);
	route_entry* p_ent = new route_entry(key);
	update_entry(p_ent, true);
	rt_mgr_logdbg("new entry %p created successfully", p_ent);
	return p_ent;
}
Ejemplo n.º 17
0
route_entry* route_table_mgr::create_new_entry(ip_address p_ip, const observer *obs)
{
	// no need for lock - lock is activated in cache_collection_mgr::register_observer

	rt_mgr_logdbg("");
	NOT_IN_USE(obs);
	route_entry* p_rte = new route_entry(p_ip);
	update_entry(p_rte, true);
	rt_mgr_logdbg("new entry %p created successfully", p_rte);
	return p_rte;
}
Ejemplo n.º 18
0
void net_device_entry::handle_event_ibverbs_cb(void *ev_data, void *ctx)
{
	NOT_IN_USE(ctx);
	struct ibv_async_event *ibv_event = (struct ibv_async_event*)ev_data;
	nde_logdbg("received ibv_event '%s' (%d)", priv_ibv_event_desc_str(ibv_event->event_type), ibv_event->event_type);
	if (ibv_event->event_type == IBV_EVENT_PORT_ERR || ibv_event->event_type == IBV_EVENT_PORT_ACTIVE) {
		timer_count = 0;
		g_p_event_handler_manager->unregister_timer_event(this, m_timer_handle);
		m_timer_handle = g_p_event_handler_manager->register_timer_event(SLAVE_CHECK_FAST_TIMER_PERIOD_MSEC, this, PERIODIC_TIMER, 0);
	}
}
Ejemplo n.º 19
0
bool epoll_wait_call::check_all_offloaded_sockets(uint64_t *p_poll_sn)
{
	NOT_IN_USE(p_poll_sn);
	m_n_all_ready_fds = get_current_events();

	if (!m_n_ready_rfds)
	{
		// check cq for acks
		ring_poll_and_process_element(&m_poll_sn, NULL);
		m_n_all_ready_fds = get_current_events();
	}

	__log_func("m_n_all_ready_fds=%d, m_n_ready_rfds=%d, m_n_ready_wfds=%d", m_n_all_ready_fds, m_n_ready_rfds, m_n_ready_wfds);
	return m_n_all_ready_fds;
}
Ejemplo n.º 20
0
void stats_data_reader::handle_timer_expired(void *ctx)
{
        NOT_IN_USE(ctx); 
        
        if (!should_write()) {
                return;
        }

        stats_read_map_t::iterator iter;
	g_lock_skt_stats.lock();
	for (iter = m_data_map.begin(); iter != m_data_map.end(); iter++) {
                memcpy(SHM_DATA_ADDRESS, LOCAL_OBJECT_DATA, COPY_SIZE);
        }
	g_lock_skt_stats.unlock();

}
Ejemplo n.º 21
0
void igmp_handler::handle_timer_expired(void* user_data)
{
	NOT_IN_USE(user_data);
	igmp_hdlr_logdbg("Timeout expired");
	m_timer_handle = NULL;

	if (m_ignore_timer) {
		igmp_hdlr_logdbg("Ignoring timeout handling due to captured IGMP report");
		return;
	}
	igmp_hdlr_logdbg("Sending igmp report");

	if (!tx_igmp_report()) {
		igmp_hdlr_logdbg("Send igmp report failed, registering new timer");
		priv_register_timer_event(this, ONE_SHOT_TIMER, (void*)IGMP_TIMER_ID);
	}
}
Ejemplo n.º 22
0
void  fd_collection::handle_timer_expired(void* user_data)
{
	sock_fd_api_list_t::iterator itr;
	fdcoll_logfunc();

	lock();

	NOT_IN_USE(user_data);

	for (itr = m_pendig_to_remove_lst.begin(); itr != m_pendig_to_remove_lst.end(); ) {
		if((*itr)->is_closable()) {
			fdcoll_logfunc("Closing:%d", (*itr)->get_fd());
			//The socket is ready to be closed, remove it from the list + delete it
			socket_fd_api* p_sock_fd = *itr;
			itr++;
			m_pendig_to_remove_lst.erase(p_sock_fd);

			if (p_sock_fd) {
				p_sock_fd->clean_obj();
				p_sock_fd = NULL;
			}

			//Deactivate the timer since there are no any pending to remove socket to handle anymore
			if (!m_pendig_to_remove_lst.size()) {
				if (m_timer_handle) {
					g_p_event_handler_manager->unregister_timer_event(this, m_timer_handle);
					m_timer_handle = 0;
				}
			}
		}
		else { //The socket is not closable yet
			sockinfo_tcp* si_tcp = dynamic_cast<sockinfo_tcp*>(*itr);

			if (si_tcp) {
				//In case of TCP socket progress the TCP connection
				fdcoll_logfunc("Call to handler timer of TCP socket:%d", (*itr)->get_fd());
				si_tcp->handle_timer_expired(NULL);
			}
			itr++;
		}
	}

	unlock();
}
Ejemplo n.º 23
0
void ip_frag_manager::handle_timer_expired(void* user_data)
{
	NOT_IN_USE(user_data);
	ip_frags_list_t::iterator iter, iter_temp;
	ip_frag_desc_t *desc;
	uint64_t delta =0;

	lock();
	if (m_frag_counter > IP_FRAG_SPACE) {
		delta = m_frag_counter - IP_FRAG_SPACE;
		m_frag_counter -= delta;
	}

	frag_dbg("calling handle_timer_expired, m_frag_counter=%ld, delta=%ld", m_frag_counter, delta);
	PRINT_STATISTICS();

	iter = m_frags.begin();
	while (iter != m_frags.end()) {
		desc = iter->second;
		desc->frag_counter -= delta;
		if (desc->frag_counter<0 || (desc->ttl <= 0)) {	//discard this packet
			frag_dbg("expiring packet fragments desc=%p (frag_counter=%d, ttl=%d)", desc, desc->frag_counter, desc->ttl);
			destroy_frag_desc(desc);
			free_frag_desc(desc);
			iter_temp = iter++;
			m_frags.erase(iter_temp);
		}
		else {
			iter++;
		}

		--desc->ttl;
	}

	owner_desc_map_t temp_buff_map = m_return_descs;
	m_return_descs.clear();

	PRINT_STATISTICS();
	unlock();

	// Must call cq_mgr outside the lock to avoid ABBA deadlock
	return_buffers_to_owners(temp_buff_map);
}
Ejemplo n.º 24
0
void stats_data_reader::handle_timer_expired(void *ctx)
{
	NOT_IN_USE(ctx);

	if (!should_write()) {
		return;
	}

	if (g_sh_mem->fd_dump != STATS_FD_STATISTICS_DISABLED) {
		vma_get_api()->dump_fd_stats(g_sh_mem->fd_dump, g_sh_mem->fd_dump_log_level);
		g_sh_mem->fd_dump = STATS_FD_STATISTICS_DISABLED;
		g_sh_mem->fd_dump_log_level = STATS_FD_STATISTICS_LOG_LEVEL_DEFAULT;
	}
	stats_read_map_t::iterator iter;
	m_lock_data_map.lock();
	for (iter = m_data_map.begin(); iter != m_data_map.end(); iter++) {
		memcpy(SHM_DATA_ADDRESS, LOCAL_OBJECT_DATA, COPY_SIZE);
	}
	m_lock_data_map.unlock();

}
Ejemplo n.º 25
0
int socket_fd_api::rx_request_notification(uint64_t poll_sn)
{
	NOT_IN_USE(poll_sn);
	__log_info_funcall("");
	return false;
}
Ejemplo n.º 26
0
ssize_t dst_entry_tcp::fast_send(const struct iovec* p_iov, const ssize_t sz_iov, bool b_blocked /*= true*/, bool is_rexmit /*= false*/, bool dont_inline /*= false*/)
{
	tx_packet_template_t* p_pkt;
	mem_buf_desc_t *p_mem_buf_desc;
	size_t total_packet_len = 0;
	// The header is aligned for fast copy but we need to maintain this diff in order to get the real header pointer easily
	size_t hdr_alignment_diff = m_header.m_aligned_l2_l3_len - m_header.m_total_hdr_len;

	tcp_iovec* p_tcp_iov = NULL;
	bool no_copy = true;
	if (likely(sz_iov == 1 && !is_rexmit)) {
		p_tcp_iov = (tcp_iovec*)p_iov;
		if (unlikely(!m_p_ring->is_active_member(p_tcp_iov->p_desc->p_desc_owner, m_id))) {
			no_copy = false;
			dst_tcp_logdbg("p_desc=%p wrong desc_owner=%p, this ring=%p. did migration occurred?", p_tcp_iov->p_desc, p_tcp_iov->p_desc->p_desc_owner, m_p_ring);
			//todo can we handle this in migration (by going over all buffers lwip hold) instead for every send?
		}
	} else {
		no_copy = false;
	}

	if (unlikely(is_rexmit))
		m_p_ring->inc_ring_stats(m_id);

	if (likely(no_copy)) {
		p_pkt = (tx_packet_template_t*)((uint8_t*)p_tcp_iov[0].iovec.iov_base - m_header.m_aligned_l2_l3_len);
		total_packet_len = p_tcp_iov[0].iovec.iov_len + m_header.m_total_hdr_len;
		m_header.copy_l2_ip_hdr(p_pkt);
		// We've copied to aligned address, and now we must update p_pkt to point to real
		// L2 header
		//p_pkt = (tx_packet_template_t*)((uint8_t*)p_pkt + hdr_alignment_diff);
		p_pkt->hdr.m_ip_hdr.tot_len = (htons)(p_tcp_iov[0].iovec.iov_len + m_header.m_ip_header_len);

		m_sge[0].addr = (uintptr_t)((uint8_t*)p_pkt + hdr_alignment_diff);
		m_sge[0].length = total_packet_len;

		/* for DEBUG */
		if ((uint8_t*)m_sge[0].addr < p_tcp_iov[0].p_desc->p_buffer || (uint8_t*)p_pkt < p_tcp_iov[0].p_desc->p_buffer) {
			dst_tcp_logerr("p_buffer - addr=%d, m_total_hdr_len=%zd, p_buffer=%p, type=%d, len=%d, tot_len=%d, payload=%p, hdr_alignment_diff=%zd\n",
					(int)(p_tcp_iov[0].p_desc->p_buffer - (uint8_t*)m_sge[0].addr), m_header.m_total_hdr_len,
					p_tcp_iov[0].p_desc->p_buffer, p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.type,
					p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.len, p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.tot_len,
					p_tcp_iov[0].p_desc->lwip_pbuf.pbuf.payload, hdr_alignment_diff);
		}

		if (!dont_inline && (total_packet_len < m_max_inline)) { // inline send
			m_p_send_wqe = &m_inline_send_wqe;

		} else {
			m_p_send_wqe = &m_not_inline_send_wqe;
		}

		m_p_send_wqe->wr_id = (uintptr_t)p_tcp_iov[0].p_desc;

#ifdef VMA_NO_HW_CSUM
		p_pkt->hdr.m_ip_hdr.check = 0; // use 0 at csum calculation time
		p_pkt->hdr.m_ip_hdr.check = compute_ip_checksum((unsigned short*)&p_pkt->hdr.m_ip_hdr, p_pkt->hdr.m_ip_hdr.ihl * 2);
		struct tcphdr* p_tcphdr = (struct tcphdr*)(((uint8_t*)(&(p_pkt->hdr.m_ip_hdr))+sizeof(p_pkt->hdr.m_ip_hdr)));
		p_tcphdr->check = 0;
		p_tcphdr->check = compute_tcp_checksum(&p_pkt->hdr.m_ip_hdr, (const uint16_t *)p_tcphdr);
		dst_tcp_logfine("using SW checksum calculation: p_pkt->hdr.m_ip_hdr.check=%d, p_tcphdr->check=%d", (int)p_pkt->hdr.m_ip_hdr.check, (int)p_tcphdr->check);
#endif
		m_p_ring->send_lwip_buffer(m_id, m_p_send_wqe, b_blocked);
	}
	else { // We don'nt support inline in this case, since we believe that this a very rare case
		p_mem_buf_desc = get_buffer(b_blocked);
		if (p_mem_buf_desc == NULL) {
			return -1;
		}

		m_header.copy_l2_ip_hdr((tx_packet_template_t*)p_mem_buf_desc->p_buffer);

		// Actually this is not the real packet len we will subtract the alignment diff at the end of the copy
		total_packet_len = m_header.m_aligned_l2_l3_len;

		for (int i = 0; i < sz_iov; ++i) {
			memcpy(p_mem_buf_desc->p_buffer + total_packet_len, p_iov[i].iov_base, p_iov[i].iov_len);
			total_packet_len += p_iov[i].iov_len;
		}

		m_sge[0].addr = (uintptr_t)(p_mem_buf_desc->p_buffer + hdr_alignment_diff);
		m_sge[0].length = total_packet_len - hdr_alignment_diff;
		// LKey will be updated in ring->send() // m_sge[0].lkey = p_mem_buf_desc->lkey; 

		/* for DEBUG */
		if ((uint8_t*)m_sge[0].addr < p_mem_buf_desc->p_buffer) {
			dst_tcp_logerr("p_buffer - addr=%d, m_total_hdr_len=%zd, p_buffer=%p, type=%d, len=%d, tot_len=%d, payload=%p, hdr_alignment_diff=%zd\n",
					(int)(p_mem_buf_desc->p_buffer - (uint8_t*)m_sge[0].addr), m_header.m_total_hdr_len,
					p_mem_buf_desc->p_buffer, p_mem_buf_desc->lwip_pbuf.pbuf.type,
					p_mem_buf_desc->lwip_pbuf.pbuf.len, p_mem_buf_desc->lwip_pbuf.pbuf.tot_len,
					p_mem_buf_desc->lwip_pbuf.pbuf.payload, hdr_alignment_diff);
		}

		p_pkt = (tx_packet_template_t*)((uint8_t*)p_mem_buf_desc->p_buffer);
		p_pkt->hdr.m_ip_hdr.tot_len = (htons)(m_sge[0].length - m_header.m_transport_header_len);
#ifdef VMA_NO_HW_CSUM
		p_pkt->hdr.m_ip_hdr.check = 0; // use 0 at csum calculation time
		p_pkt->hdr.m_ip_hdr.check = compute_ip_checksum((unsigned short*)&p_pkt->hdr.m_ip_hdr, p_pkt->hdr.m_ip_hdr.ihl * 2);
		struct tcphdr* p_tcphdr = (struct tcphdr*)(((uint8_t*)(&(p_pkt->hdr.m_ip_hdr))+sizeof(p_pkt->hdr.m_ip_hdr)));
		p_tcphdr->check = 0;
		p_tcphdr->check = compute_tcp_checksum(&p_pkt->hdr.m_ip_hdr, (const uint16_t *)p_tcphdr);
		dst_tcp_logfine("using SW checksum calculation: p_pkt->hdr.m_ip_hdr.check=%d, p_tcphdr->check=%d", (int)p_pkt->hdr.m_ip_hdr.check, (int)p_tcphdr->check);
#endif
		m_p_send_wqe = &m_not_inline_send_wqe;
		m_p_send_wqe->wr_id = (uintptr_t)p_mem_buf_desc;
		m_p_ring->send_ring_buffer(m_id, m_p_send_wqe, b_blocked);
	}

#ifndef __COVERITY__
        struct tcphdr* p_tcp_h = (struct tcphdr*)(((uint8_t*)(&(p_pkt->hdr.m_ip_hdr))+sizeof(p_pkt->hdr.m_ip_hdr)));
        NOT_IN_USE(p_tcp_h); /* to supress warning in case VMA_OPTIMIZE_LOG */
        dst_tcp_logfunc("Tx TCP segment info: src_port=%d, dst_port=%d, flags='%s%s%s%s%s%s' seq=%u, ack=%u, win=%u, payload_sz=%u",
                        ntohs(p_tcp_h->source), ntohs(p_tcp_h->dest),
                        p_tcp_h->urg?"U":"", p_tcp_h->ack?"A":"", p_tcp_h->psh?"P":"",
                        p_tcp_h->rst?"R":"", p_tcp_h->syn?"S":"", p_tcp_h->fin?"F":"",
                        ntohl(p_tcp_h->seq), ntohl(p_tcp_h->ack_seq), ntohs(p_tcp_h->window),
                        total_packet_len- p_tcp_h->doff*4 -34);
#endif

	if (unlikely(m_p_tx_mem_buf_desc_list == NULL)) {
		m_p_tx_mem_buf_desc_list = m_p_ring->mem_buf_tx_get(m_id, b_blocked, m_n_sysvar_tx_bufs_batch_tcp);
	}

	return 0;
}
Ejemplo n.º 27
0
transport_t dst_entry_tcp::get_transport(sockaddr_in to)
{
	NOT_IN_USE(to);
	return TRANS_VMA;
}
Ejemplo n.º 28
0
int socket_fd_api::free_packets(struct vma_packet_t *pkts, size_t count)
{
	NOT_IN_USE(pkts);
	NOT_IN_USE(count);
	return -1;
}
Ejemplo n.º 29
0
int socket_fd_api::register_callback(vma_recv_callback_t callback, void *context)
{
	NOT_IN_USE(callback);
	NOT_IN_USE(context);
	return -1;
}
Ejemplo n.º 30
0
bool socket_fd_api::is_errorable(int *errors)
{
	NOT_IN_USE(errors);
	__log_info_funcall("");
	return false;
}