uint32_t cq_mgr_mlx5::clean_cq() { uint32_t ret_total = 0; uint64_t cq_poll_sn = 0; mem_buf_desc_t* buff; if (m_b_is_rx) { if (m_rq) { buff_status_e status = BS_OK; while((buff = poll(status))) { if (process_cq_element_rx( buff, status)) { m_rx_queue.push_back(buff); } ++ret_total; } update_global_sn(cq_poll_sn, ret_total); } } else {//Tx int ret = 0; /* coverity[stack_use_local_overflow] */ vma_ibv_wc wce[MCE_MAX_CQ_POLL_BATCH]; while ((ret = cq_mgr::poll(wce, MCE_MAX_CQ_POLL_BATCH, &cq_poll_sn)) > 0) { for (int i = 0; i < ret; i++) { buff = process_cq_element_tx(&wce[i]); if (buff) m_rx_queue.push_back(buff); } ret_total += ret; } } return ret_total; }
cq_mgr::~cq_mgr() { cq_logdbg("destroying CQ as %s", (m_b_is_rx?"Rx":"Tx")); int ret = 0; uint32_t ret_total = 0; uint64_t cq_poll_sn = 0; mem_buf_desc_t* buff = NULL; struct ibv_wc wce[MCE_MAX_CQ_POLL_BATCH]; while ((ret = poll(wce, MCE_MAX_CQ_POLL_BATCH, &cq_poll_sn)) > 0) { for (int i = 0; i < ret; i++) { if (m_b_is_rx) { buff = process_cq_element_rx(&wce[i]); } else { buff = process_cq_element_tx(&wce[i]); } if (buff) m_rx_queue.push_back(buff); } ret_total += ret; } m_b_was_drained = true; if (ret_total > 0) { cq_logdbg("Drained %d wce", ret_total); } if (m_rx_queue.size() + m_rx_pool.size()) { cq_logdbg("Returning %d buffers to global Rx pool (ready queue %d, free pool %d))", m_rx_queue.size() + m_rx_pool.size(), m_rx_queue.size(), m_rx_pool.size()); g_buffer_pool_rx->put_buffers_thread_safe(&m_rx_queue, m_rx_queue.size()); m_p_cq_stat->n_rx_sw_queue_len = m_rx_queue.size(); g_buffer_pool_rx->put_buffers_thread_safe(&m_rx_pool, m_rx_pool.size()); m_p_cq_stat->n_buffer_pool_len = m_rx_pool.size(); } cq_logfunc("destroying ibv_cq"); IF_VERBS_FAILURE(ibv_destroy_cq(m_p_ibv_cq)) { cq_logerr("destroy cq failed (errno=%d %m)", errno); } ENDIF_VERBS_FAILURE; statistics_print(); if (m_b_is_rx) vma_stats_instance_remove_cq_block(m_p_cq_stat); cq_logdbg("done"); }