cq_mgr::cq_mgr(ring* p_ring, ib_ctx_handler* p_ib_ctx_handler, int cq_size, struct ibv_comp_channel* p_comp_event_channel, bool is_rx) : m_p_ring(p_ring), m_p_ib_ctx_handler(p_ib_ctx_handler), m_b_is_rx(is_rx), m_comp_event_channel(p_comp_event_channel), m_p_next_rx_desc_poll(NULL) { cq_logfunc(""); m_n_wce_counter = 0; m_b_was_drained = false; m_b_notification_armed = false; m_n_out_of_free_bufs_warning = 0; m_n_cq_poll_sn = 0; m_cq_id = atomic_fetch_and_inc(&m_n_cq_id_counter); // cq id is nonzero m_transport_type = m_p_ring->get_transport_type(); m_p_ibv_cq = ibv_create_cq(m_p_ib_ctx_handler->get_ibv_context(), cq_size, (void*)this, m_comp_event_channel, 0); BULLSEYE_EXCLUDE_BLOCK_START if (!m_p_ibv_cq) { cq_logpanic("ibv_create_cq failed (errno=%d %m)", errno); } BULLSEYE_EXCLUDE_BLOCK_END // use local copy of stats by default (on rx cq get shared memory stats) m_p_cq_stat = &m_cq_stat_static; memset(m_p_cq_stat , 0, sizeof(*m_p_cq_stat)); /* m_p_cq_stat->n_rx_sw_queue_len = 0; m_p_cq_stat->n_rx_pkt_drop = 0; m_p_cq_stat->n_rx_drained_at_once_max = 0; m_p_cq_stat->n_buffer_pool_len = 0; m_p_cq_stat->buffer_miss_rate = 0.0; //*/ m_buffer_miss_count = 0; m_buffer_total_count = 0; m_buffer_prev_id = 0; m_sz_transport_header = 0; switch (m_transport_type) { case VMA_TRANSPORT_IB: m_sz_transport_header = GRH_HDR_LEN; break; case VMA_TRANSPORT_ETH: m_sz_transport_header = ETH_HDR_LEN; break; BULLSEYE_EXCLUDE_BLOCK_START default: cq_logpanic("Unknown transport type: %d", m_transport_type); break; BULLSEYE_EXCLUDE_BLOCK_END } if (m_b_is_rx) vma_stats_instance_create_cq_block(m_p_cq_stat); cq_logdbg("Created CQ as %s with fd[%d] and of size %d elements (ibv_cq_hndl=%p)", (m_b_is_rx?"Rx":"Tx"), get_channel_fd(), cq_size, m_p_ibv_cq); }
cq_mgr::~cq_mgr() { cq_logdbg("destroying CQ as %s", (m_b_is_rx?"Rx":"Tx")); int ret = 0; uint32_t ret_total = 0; uint64_t cq_poll_sn = 0; mem_buf_desc_t* buff = NULL; struct ibv_wc wce[MCE_MAX_CQ_POLL_BATCH]; while ((ret = poll(wce, MCE_MAX_CQ_POLL_BATCH, &cq_poll_sn)) > 0) { for (int i = 0; i < ret; i++) { if (m_b_is_rx) { buff = process_cq_element_rx(&wce[i]); } else { buff = process_cq_element_tx(&wce[i]); } if (buff) m_rx_queue.push_back(buff); } ret_total += ret; } m_b_was_drained = true; if (ret_total > 0) { cq_logdbg("Drained %d wce", ret_total); } if (m_rx_queue.size() + m_rx_pool.size()) { cq_logdbg("Returning %d buffers to global Rx pool (ready queue %d, free pool %d))", m_rx_queue.size() + m_rx_pool.size(), m_rx_queue.size(), m_rx_pool.size()); g_buffer_pool_rx->put_buffers_thread_safe(&m_rx_queue, m_rx_queue.size()); m_p_cq_stat->n_rx_sw_queue_len = m_rx_queue.size(); g_buffer_pool_rx->put_buffers_thread_safe(&m_rx_pool, m_rx_pool.size()); m_p_cq_stat->n_buffer_pool_len = m_rx_pool.size(); } cq_logfunc("destroying ibv_cq"); IF_VERBS_FAILURE(ibv_destroy_cq(m_p_ibv_cq)) { cq_logerr("destroy cq failed (errno=%d %m)", errno); } ENDIF_VERBS_FAILURE; statistics_print(); if (m_b_is_rx) vma_stats_instance_remove_cq_block(m_p_cq_stat); cq_logdbg("done"); }
cq_mgr_mlx5::cq_mgr_mlx5(ring_simple* p_ring, ib_ctx_handler* p_ib_ctx_handler, uint32_t cq_size, struct ibv_comp_channel* p_comp_event_channel, bool is_rx, bool call_configure): cq_mgr(p_ring, p_ib_ctx_handler, cq_size, p_comp_event_channel, is_rx, call_configure) ,m_cq_size(cq_size) ,m_cq_cons_index(0) ,m_cqes(NULL) ,m_cq_dbell(NULL) ,m_rq(NULL) ,m_cqe_log_sz(0) ,m_rx_hot_buffer(NULL) ,m_p_rq_wqe_idx_to_wrid(NULL) ,m_qp(NULL) ,m_mlx5_cq(NULL) { cq_logfunc(""); }
cq_mgr_mlx5::~cq_mgr_mlx5() { cq_logfunc(""); cq_logdbg("destroying CQ as %s", (m_b_is_rx?"Rx":"Tx")); m_rq = NULL; }