cq_mgr::~cq_mgr() { cq_logdbg("destroying CQ as %s", (m_b_is_rx?"Rx":"Tx")); int ret = 0; uint32_t ret_total = 0; uint64_t cq_poll_sn = 0; mem_buf_desc_t* buff = NULL; struct ibv_wc wce[MCE_MAX_CQ_POLL_BATCH]; while ((ret = poll(wce, MCE_MAX_CQ_POLL_BATCH, &cq_poll_sn)) > 0) { for (int i = 0; i < ret; i++) { if (m_b_is_rx) { buff = process_cq_element_rx(&wce[i]); } else { buff = process_cq_element_tx(&wce[i]); } if (buff) m_rx_queue.push_back(buff); } ret_total += ret; } m_b_was_drained = true; if (ret_total > 0) { cq_logdbg("Drained %d wce", ret_total); } if (m_rx_queue.size() + m_rx_pool.size()) { cq_logdbg("Returning %d buffers to global Rx pool (ready queue %d, free pool %d))", m_rx_queue.size() + m_rx_pool.size(), m_rx_queue.size(), m_rx_pool.size()); g_buffer_pool_rx->put_buffers_thread_safe(&m_rx_queue, m_rx_queue.size()); m_p_cq_stat->n_rx_sw_queue_len = m_rx_queue.size(); g_buffer_pool_rx->put_buffers_thread_safe(&m_rx_pool, m_rx_pool.size()); m_p_cq_stat->n_buffer_pool_len = m_rx_pool.size(); } cq_logfunc("destroying ibv_cq"); IF_VERBS_FAILURE(ibv_destroy_cq(m_p_ibv_cq)) { cq_logerr("destroy cq failed (errno=%d %m)", errno); } ENDIF_VERBS_FAILURE; statistics_print(); if (m_b_is_rx) vma_stats_instance_remove_cq_block(m_p_cq_stat); cq_logdbg("done"); }
inline void cq_mgr_mlx5::cqe64_to_mem_buff_desc(struct mlx5_cqe64 *cqe, mem_buf_desc_t* p_rx_wc_buf_desc, enum buff_status_e &status) { struct mlx5_err_cqe *ecqe; ecqe = (struct mlx5_err_cqe *)cqe; switch (MLX5_CQE_OPCODE(cqe->op_own)) { case MLX5_CQE_RESP_WR_IMM: cq_logerr("IBV_WC_RECV_RDMA_WITH_IMM is not supported"); status = BS_CQE_RESP_WR_IMM_NOT_SUPPORTED; break; case MLX5_CQE_RESP_SEND: case MLX5_CQE_RESP_SEND_IMM: case MLX5_CQE_RESP_SEND_INV: { status = BS_OK; p_rx_wc_buf_desc->rx.hw_raw_timestamp = ntohll(cqe->timestamp); p_rx_wc_buf_desc->rx.flow_tag_id = vma_get_flow_tag(cqe); #ifdef DEFINED_MLX5_HW_ETH_WQE_HEADER p_rx_wc_buf_desc->rx.is_sw_csum_need = !(m_b_is_rx_hw_csum_on && (cqe->hds_ip_ext & MLX5_CQE_L4_OK) && (cqe->hds_ip_ext & MLX5_CQE_L3_OK)); #else p_rx_wc_buf_desc->rx.is_sw_csum_need = !m_b_is_rx_hw_csum_on; /* we assume that the checksum is ok */ #endif p_rx_wc_buf_desc->sz_data = ntohl(cqe->byte_cnt); return; } case MLX5_CQE_INVALID: /* No cqe!*/ { cq_logerr("We should no receive a buffer without a cqe\n"); status = BS_CQE_INVALID; break; } case MLX5_CQE_REQ: case MLX5_CQE_SIG_ERR: case MLX5_CQE_REQ_ERR: case MLX5_CQE_RESP_ERR: default: { if (MLX5_CQE_SYNDROME_WR_FLUSH_ERR == ecqe->syndrome) { status = BS_IBV_WC_WR_FLUSH_ERR; } else { status = BS_GENERAL_ERR; } /* IB compliant completion with error syndrome: 0x1: Local_Length_Error 0x2: Local_QP_Operation_Error 0x4: Local_Protection_Error 0x5: Work_Request_Flushed_Error 0x6: Memory_Window_Bind_Error 0x10: Bad_Response_Error 0x11: Local_Access_Error 0x12: Remote_Invalid_Request_Error 0x13: Remote_Access_Error 0x14: Remote_Operation_Error 0x15: Transport_Retry_Counter_Exceeded 0x16: RNR_Retry_Counter_Exceeded 0x22: Aborted_Error other: Reserved */ break; } } }