bool rfs::add_sink(pkt_rcvr_sink* p_sink) { uint32_t i; rfs_logfunc("called with sink (%p)", p_sink); // Check all sinks list array if already exists. for (i = 0; i < m_n_sinks_list_entries; ++i) { if (m_sinks_list[i] == p_sink) { rfs_logdbg("sink (%p) already registered!!!", p_sink); return true; } } if (m_n_sinks_list_entries == m_n_sinks_list_max_length) { // Sinks list array is full // Reallocate a new array with double size uint32_t tmp_sinks_list_length = 2*m_n_sinks_list_max_length; pkt_rcvr_sink** tmp_sinks_list = new pkt_rcvr_sink*[tmp_sinks_list_length]; BULLSEYE_EXCLUDE_BLOCK_START if (tmp_sinks_list == NULL) { rfs_logpanic("sinks list allocation failed!"); } BULLSEYE_EXCLUDE_BLOCK_END memcpy(tmp_sinks_list, m_sinks_list, sizeof(pkt_rcvr_sink*)*m_n_sinks_list_max_length); delete[] m_sinks_list; m_sinks_list = tmp_sinks_list; m_n_sinks_list_max_length = tmp_sinks_list_length; }
rfs_uc::rfs_uc(flow_tuple *flow_spec_5t, ring *p_ring, rfs_rule_filter* rule_filter /*= NULL*/) : rfs(flow_spec_5t, p_ring, rule_filter) { BULLSEYE_EXCLUDE_BLOCK_START if (m_flow_tuple.is_udp_mc()) { rfs_logpanic("rfs: rfs_uc called with MC destination ip"); } BULLSEYE_EXCLUDE_BLOCK_END prepare_flow_spec(); }
void rfs_uc::prepare_flow_spec() { transport_type_t type = m_p_ring->get_transport_type(); ring_resources_map_t::iterator ring_resource_iter = m_p_ring->m_ring_resources_map.begin(); for (; ring_resource_iter != m_p_ring->m_ring_resources_map.end(); ring_resource_iter++) { attach_flow_data_t* p_attach_flow_data = NULL; attach_flow_data_ib_ipv4_tcp_udp_t* attach_flow_data_ib = NULL; attach_flow_data_eth_ipv4_tcp_udp_t* attach_flow_data_eth = NULL; vma_ibv_flow_spec_ipv4* p_ipv4 = NULL; vma_ibv_flow_spec_tcp_udp* p_tcp_udp = NULL; switch (type) { case VMA_TRANSPORT_IB: attach_flow_data_ib = new attach_flow_data_ib_ipv4_tcp_udp_t(ring_resource_iter->second.m_p_qp_mgr); #ifdef DEFINED_IBV_FLOW_SPEC_IB ibv_flow_spec_ib_set_by_dst_qpn(&(attach_flow_data_ib->ibv_flow_attr.ib), htonl(((IPoIB_addr*)ring_resource_iter->first.get_l2_addr())->get_qpn())); #endif p_ipv4 = &(attach_flow_data_ib->ibv_flow_attr.ipv4); p_tcp_udp = &(attach_flow_data_ib->ibv_flow_attr.tcp_udp); p_attach_flow_data = (attach_flow_data_t*)attach_flow_data_ib; break; case VMA_TRANSPORT_ETH: attach_flow_data_eth = new attach_flow_data_eth_ipv4_tcp_udp_t(ring_resource_iter->second.m_p_qp_mgr); ibv_flow_spec_eth_set(&(attach_flow_data_eth->ibv_flow_attr.eth), ring_resource_iter->first.get_l2_addr()->get_address(), htons(ring_resource_iter->second.m_p_qp_mgr->get_partiton())); p_ipv4 = &(attach_flow_data_eth->ibv_flow_attr.ipv4); p_tcp_udp = &(attach_flow_data_eth->ibv_flow_attr.tcp_udp); p_attach_flow_data = (attach_flow_data_t*)attach_flow_data_eth; break; BULLSEYE_EXCLUDE_BLOCK_START default: rfs_logpanic("Incompatible transport type = %d", type); break; BULLSEYE_EXCLUDE_BLOCK_END } ibv_flow_spec_ipv4_set(p_ipv4, m_flow_tuple.get_dst_ip(), m_flow_tuple.get_src_ip()); ibv_flow_spec_tcp_udp_set(p_tcp_udp, (m_flow_tuple.get_protocol() == PROTO_TCP), m_flow_tuple.get_dst_port(), m_flow_tuple.get_src_port()); m_attach_flow_data_vector.push_back(p_attach_flow_data); } }
rfs::rfs(flow_tuple *flow_spec_5t, ring_simple *p_ring, rfs_rule_filter* rule_filter /*= NULL*/, uint32_t flow_tag_id /*=0*/): m_flow_tuple(rule_filter ? rule_filter->m_flow_tuple : *flow_spec_5t), m_p_ring(p_ring), m_p_rule_filter(rule_filter), m_n_sinks_list_entries(0), m_n_sinks_list_max_length(RFS_SINKS_LIST_DEFAULT_LEN), m_flow_tag_id(flow_tag_id), m_b_tmp_is_attached(false) { m_sinks_list = new pkt_rcvr_sink*[m_n_sinks_list_max_length]; BULLSEYE_EXCLUDE_BLOCK_START if (m_sinks_list == NULL) { rfs_logpanic("sinks list allocation failed!"); } BULLSEYE_EXCLUDE_BLOCK_END memset(m_sinks_list, 0, sizeof(pkt_rcvr_sink*)*m_n_sinks_list_max_length); }
rfs::rfs(flow_tuple *flow_spec_5t, ring *p_ring): m_flow_tuple(*flow_spec_5t), m_p_ring(p_ring), m_n_sinks_list_entries(0), m_n_sinks_list_max_length(RFS_SINKS_LIST_DEFAULT_LEN), m_b_tmp_is_attached(false) { m_sinks_list = new pkt_rcvr_sink*[m_n_sinks_list_max_length]; BULLSEYE_EXCLUDE_BLOCK_START if (m_sinks_list == NULL) { rfs_logpanic("sinks list allocation failed!"); } BULLSEYE_EXCLUDE_BLOCK_END memset(m_sinks_list, 0, sizeof(pkt_rcvr_sink*)*m_n_sinks_list_max_length); }
void rfs_uc::prepare_flow_spec() { transport_type_t type = m_p_ring->get_transport_type(); /* * todo note that ring is not locked here. * we touch members that should not change during the ring life. * the ring will not be deleted as we increased refcnt. * if one of these assumptions change, we must lock. */ ring_resources_map_t::iterator ring_resource_iter = m_p_ring->m_ring_resources_map.begin(); for (; ring_resource_iter != m_p_ring->m_ring_resources_map.end(); ring_resource_iter++) { attach_flow_data_t* p_attach_flow_data = NULL; attach_flow_data_ib_ipv4_tcp_udp_t* attach_flow_data_ib = NULL; attach_flow_data_eth_ipv4_tcp_udp_t* attach_flow_data_eth = NULL; vma_ibv_flow_spec_ipv4* p_ipv4 = NULL; vma_ibv_flow_spec_tcp_udp* p_tcp_udp = NULL; switch (type) { case VMA_TRANSPORT_IB: attach_flow_data_ib = new attach_flow_data_ib_ipv4_tcp_udp_t(ring_resource_iter->second.m_p_qp_mgr); #ifdef DEFINED_IBV_FLOW_SPEC_IB ibv_flow_spec_ib_set_by_dst_qpn(&(attach_flow_data_ib->ibv_flow_attr.ib), htonl(((IPoIB_addr*)ring_resource_iter->first.get_l2_addr())->get_qpn())); #endif p_ipv4 = &(attach_flow_data_ib->ibv_flow_attr.ipv4); p_tcp_udp = &(attach_flow_data_ib->ibv_flow_attr.tcp_udp); p_attach_flow_data = (attach_flow_data_t*)attach_flow_data_ib; break; case VMA_TRANSPORT_ETH: attach_flow_data_eth = new attach_flow_data_eth_ipv4_tcp_udp_t(ring_resource_iter->second.m_p_qp_mgr); ibv_flow_spec_eth_set(&(attach_flow_data_eth->ibv_flow_attr.eth), ring_resource_iter->first.get_l2_addr()->get_address(), htons(ring_resource_iter->second.m_p_qp_mgr->get_partiton())); p_ipv4 = &(attach_flow_data_eth->ibv_flow_attr.ipv4); p_tcp_udp = &(attach_flow_data_eth->ibv_flow_attr.tcp_udp); p_attach_flow_data = (attach_flow_data_t*)attach_flow_data_eth; break; BULLSEYE_EXCLUDE_BLOCK_START default: rfs_logpanic("Incompatible transport type = %d", type); break; BULLSEYE_EXCLUDE_BLOCK_END } ibv_flow_spec_ipv4_set(p_ipv4, m_flow_tuple.get_dst_ip(), m_flow_tuple.get_src_ip()); ibv_flow_spec_tcp_udp_set(p_tcp_udp, (m_flow_tuple.get_protocol() == PROTO_TCP), m_flow_tuple.get_dst_port(), m_flow_tuple.get_src_port()); if (m_flow_tuple.get_src_port() || m_flow_tuple.get_src_ip()) { // set priority of 5-tuple to be higher than 3-tuple // to make sure 5-tuple have higher priority on ConnectX-4 p_attach_flow_data->ibv_flow_attr.priority = 0; } m_attach_flow_data_vector.push_back(p_attach_flow_data); } }