net_device_resources_t* sockinfo::create_nd_resources(const ip_address ip_local) { net_device_resources_t* p_nd_resources = NULL; // Check if we are already registered to net_device with the local ip as observers rx_net_device_map_t::iterator rx_nd_iter = m_rx_nd_map.find(ip_local.get_in_addr()); if (rx_nd_iter == m_rx_nd_map.end()) { // Need to register as observer to net_device net_device_resources_t nd_resources; nd_resources.refcnt = 0; nd_resources.p_nde = NULL; nd_resources.p_ndv = NULL; nd_resources.p_ring = NULL; BULLSEYE_EXCLUDE_BLOCK_START cache_entry_subject<ip_address, net_device_val*>* p_ces = NULL; if (!g_p_net_device_table_mgr->register_observer(ip_local, &m_rx_nd_observer, &p_ces)) { si_logdbg("Failed registering as observer for local ip %s", ip_local.to_str().c_str()); goto err; } nd_resources.p_nde = (net_device_entry*)p_ces; if (!nd_resources.p_nde) { si_logerr("Got NULL net_devide_entry for local ip %s", ip_local.to_str().c_str()); goto err; } if (!nd_resources.p_nde->get_val(nd_resources.p_ndv)) { si_logerr("Got net_device_val=NULL (interface is not offloaded) for local ip %s", ip_local.to_str().c_str()); goto err; } unlock_rx_q(); m_rx_ring_map_lock.lock(); resource_allocation_key *key; if (m_rx_ring_map.size() && m_ring_alloc_logic.is_logic_support_migration()) { key = m_ring_alloc_logic.get_key(); } else { key = m_ring_alloc_logic.create_new_key(ip_local.get_in_addr()); } nd_resources.p_ring = nd_resources.p_ndv->reserve_ring(key); m_rx_ring_map_lock.unlock(); lock_rx_q(); if (!nd_resources.p_ring) { si_logdbg("Failed to reserve ring for allocation key %s on ip %s", m_ring_alloc_logic.get_key()->to_str(), ip_local.to_str().c_str()); goto err; } // Add new net_device to rx_map m_rx_nd_map[ip_local.get_in_addr()] = nd_resources; rx_nd_iter = m_rx_nd_map.find(ip_local.get_in_addr()); if (rx_nd_iter == m_rx_nd_map.end()) { si_logerr("Failed to find rx_nd_iter"); goto err; } BULLSEYE_EXCLUDE_BLOCK_END }
int sockinfo::rx_wait_helper(int &poll_count, bool is_blocking) { int ret; uint64_t poll_sn = 0; epoll_event rx_epfd_events[SI_RX_EPFD_EVENT_MAX]; rx_ring_map_t::iterator rx_ring_iter; // poll for completion si_logfunc(""); poll_count++; for (rx_ring_iter = m_rx_ring_map.begin(); rx_ring_iter != m_rx_ring_map.end(); rx_ring_iter++) { //BULLSEYE_EXCLUDE_BLOCK_START if (unlikely(rx_ring_iter->second->refcnt <= 0)) { si_logerr("Attempted to poll illegal cq"); continue; } //BULLSEYE_EXCLUDE_BLOCK_END ret = rx_ring_iter->first->poll_and_process_element_rx(&poll_sn); if (ret > 0) { si_logfuncall("got %d elements sn=%llu", ret, (unsigned long long)poll_sn); return ret; } } if (poll_count < m_n_sysvar_rx_poll_num || m_n_sysvar_rx_poll_num == -1) { return 0; } // if we polling too much - go to sleep si_logfunc("too many polls without data blocking=%d", is_blocking); if (g_b_exit) return -1; if (!is_blocking) { /* if we are in non blocking mode - return EAGAIN */ errno = EAGAIN; return -1; } for (rx_ring_iter = m_rx_ring_map.begin(); rx_ring_iter != m_rx_ring_map.end(); rx_ring_iter++) { if (rx_ring_iter->second->refcnt <= 0) { continue; } // coverity[check_return] rx_ring_iter->first->request_notification(CQT_RX, poll_sn); } ret = orig_os_api.epoll_wait(m_rx_epfd, rx_epfd_events, SI_RX_EPFD_EVENT_MAX, -1); if (ret < 0) return -1; if (ret == 0) return 0; for (int event_idx = 0; event_idx < ret; ++event_idx) { int cq_channel_fd = rx_epfd_events[event_idx].data.fd; cq_channel_info* p_cq_ch_info = g_p_fd_collection->get_cq_channel_fd(cq_channel_fd); if (p_cq_ch_info) { ring* p_ring = p_cq_ch_info->get_ring(); if (p_ring) { p_ring->wait_for_notification_and_process_element(cq_channel_fd, &poll_sn); } } // TODO: need to handle wakeup } return 0; }
bool sockinfo::attach_receiver(flow_tuple_with_local_if &flow_key) { // This function should be called from within mutex protected context of the sockinfo!!! si_logdbg("Attaching to %s", flow_key.to_str()); // Protect against local loopback used as local_if & peer_ip // rdma_cm will accept it but we don't want to offload it if (flow_key.is_local_loopback()) { si_logdbg("VMA does not offload local loopback IP address"); return false; } if (m_rx_flow_map.find(flow_key) != m_rx_flow_map.end()) { si_logdbg("already attached %s", flow_key.to_str()); return false; } net_device_resources_t* p_nd_resources = NULL; // Check if we are already registered to net_device with the local ip as observers ip_address ip_local(flow_key.get_local_if()); rx_net_device_map_t::iterator rx_nd_iter = m_rx_nd_map.find(ip_local.get_in_addr()); if (rx_nd_iter == m_rx_nd_map.end()) { // Need to register as observer to net_device net_device_resources_t nd_resources; nd_resources.refcnt = 0; nd_resources.p_nde = NULL; nd_resources.p_ndv = NULL; nd_resources.p_ring = NULL; BULLSEYE_EXCLUDE_BLOCK_START cache_entry_subject<ip_address, net_device_val*>* p_ces = NULL; if (!g_p_net_device_table_mgr->register_observer(ip_local, &m_rx_nd_observer, &p_ces)) { si_logdbg("Failed registering as observer for local ip %s", ip_local.to_str().c_str()); return false; } nd_resources.p_nde = (net_device_entry*)p_ces; if (!nd_resources.p_nde) { si_logerr("Got NULL net_devide_entry for local ip %s", ip_local.to_str().c_str()); return false; } if (!nd_resources.p_nde->get_val(nd_resources.p_ndv)) { si_logerr("Got net_device_val=NULL (interface is not offloaded) for local ip %s", ip_local.to_str().c_str()); return false; } unlock_rx_q(); m_rx_ring_map_lock.lock(); resource_allocation_key key = 0; if (m_rx_ring_map.size()) { key = m_ring_alloc_logic.get_key(); } else { key = m_ring_alloc_logic.create_new_key(); } nd_resources.p_ring = nd_resources.p_ndv->reserve_ring(key); m_rx_ring_map_lock.unlock(); lock_rx_q(); if (!nd_resources.p_ring) { si_logdbg("Failed to reserve ring for allocation key %d on lip %s", m_ring_alloc_logic.get_key(), ip_local.to_str().c_str()); return false; } // Add new net_device to rx_map m_rx_nd_map[ip_local.get_in_addr()] = nd_resources; rx_nd_iter = m_rx_nd_map.find(ip_local.get_in_addr()); if (rx_nd_iter == m_rx_nd_map.end()) { si_logerr("Failed to find rx_nd_iter"); return false; } BULLSEYE_EXCLUDE_BLOCK_END }