Пример #1
0
uint32_t cq_mgr_mlx5::clean_cq()
{
	uint32_t ret_total = 0;
	uint64_t cq_poll_sn = 0;
	mem_buf_desc_t* buff;

	if (m_b_is_rx) {
		if (m_rq) {
			buff_status_e status = BS_OK;
			while((buff = poll(status))) {
				if (process_cq_element_rx( buff, status)) {
					m_rx_queue.push_back(buff);
				}
				++ret_total;
			}
			update_global_sn(cq_poll_sn, ret_total);
		}
	} else {//Tx
		int ret = 0;
		/* coverity[stack_use_local_overflow] */
		vma_ibv_wc wce[MCE_MAX_CQ_POLL_BATCH];
		while ((ret = cq_mgr::poll(wce, MCE_MAX_CQ_POLL_BATCH, &cq_poll_sn)) > 0) {
			for (int i = 0; i < ret; i++) {
				buff = process_cq_element_tx(&wce[i]);
				if (buff)
					m_rx_queue.push_back(buff);
			}
			ret_total += ret;
		}
	}

	return ret_total;
}
Пример #2
0
cq_mgr::~cq_mgr()
{
	cq_logdbg("destroying CQ as %s", (m_b_is_rx?"Rx":"Tx"));

	int ret = 0;
	uint32_t ret_total = 0;
	uint64_t cq_poll_sn = 0;
	mem_buf_desc_t* buff = NULL;
	struct ibv_wc wce[MCE_MAX_CQ_POLL_BATCH];
	while ((ret = poll(wce, MCE_MAX_CQ_POLL_BATCH, &cq_poll_sn)) > 0) {
		for (int i = 0; i < ret; i++) {
			if (m_b_is_rx) {
				buff = process_cq_element_rx(&wce[i]);
			} else {
				buff = process_cq_element_tx(&wce[i]);
			}
			if (buff)
				m_rx_queue.push_back(buff);
		}
		ret_total += ret;
	}
	m_b_was_drained = true;
	if (ret_total > 0) {
		cq_logdbg("Drained %d wce", ret_total);
	}

	if (m_rx_queue.size() + m_rx_pool.size()) {
		cq_logdbg("Returning %d buffers to global Rx pool (ready queue %d, free pool %d))", m_rx_queue.size() + m_rx_pool.size(), m_rx_queue.size(), m_rx_pool.size());

		g_buffer_pool_rx->put_buffers_thread_safe(&m_rx_queue, m_rx_queue.size());
		m_p_cq_stat->n_rx_sw_queue_len = m_rx_queue.size();

		g_buffer_pool_rx->put_buffers_thread_safe(&m_rx_pool, m_rx_pool.size());
		m_p_cq_stat->n_buffer_pool_len = m_rx_pool.size();
	}

	cq_logfunc("destroying ibv_cq");
	IF_VERBS_FAILURE(ibv_destroy_cq(m_p_ibv_cq)) {
		cq_logerr("destroy cq failed (errno=%d %m)", errno);
	} ENDIF_VERBS_FAILURE;

	statistics_print();
	if (m_b_is_rx)
		vma_stats_instance_remove_cq_block(m_p_cq_stat);

	cq_logdbg("done");
}
Пример #3
0
int cq_mgr_mlx5::drain_and_proccess(uintptr_t* p_recycle_buffers_last_wr_id /*=NULL*/)
{
	cq_logfuncall("cq was %sdrained. %d processed wce since last check. %d wce in m_rx_queue", (m_b_was_drained?"":"not "), m_n_wce_counter, m_rx_queue.size());

	/* CQ polling loop until max wce limit is reached for this interval or CQ is drained */
	uint32_t ret_total = 0;
	uint64_t cq_poll_sn = 0;

	if (p_recycle_buffers_last_wr_id != NULL) {
		m_b_was_drained = false;
	}

	while ((m_n_sysvar_progress_engine_wce_max > m_n_wce_counter) &&
		!m_b_was_drained) {
		buff_status_e status = BS_OK;
		mem_buf_desc_t* buff = poll(status);
		if (NULL == buff) {
			update_global_sn(cq_poll_sn, ret_total);
			m_b_was_drained = true;
			m_p_ring->m_gro_mgr.flush_all(NULL);
			return ret_total;
		}

		++m_n_wce_counter;

		if (process_cq_element_rx(buff, status)) {
			if (p_recycle_buffers_last_wr_id) {
				m_p_cq_stat->n_rx_pkt_drop++;
				reclaim_recv_buffer_helper(buff);
			} else {
				bool procces_now = false;
				if (m_transport_type == VMA_TRANSPORT_ETH) {
					procces_now = is_eth_tcp_frame(buff);
				}
				if (m_transport_type == VMA_TRANSPORT_IB) {
					procces_now = is_ib_tcp_frame(buff);
				}
				/* We process immediately all non udp/ip traffic.. */
				if (procces_now) {
					buff->rx.is_vma_thr = true;
					if (!compensate_qp_poll_success(buff)) {
						process_recv_buffer(buff, NULL);
					}
				}
				else { /* udp/ip traffic we just put in the cq's rx queue */
					m_rx_queue.push_back(buff);
					mem_buf_desc_t* buff_cur = m_rx_queue.front();
					m_rx_queue.pop_front();
					if (!compensate_qp_poll_success(buff_cur)) {
						m_rx_queue.push_front(buff_cur);
					}
				}
			}
		}

		if (p_recycle_buffers_last_wr_id) {
			*p_recycle_buffers_last_wr_id = (uintptr_t)buff;
		}

		++ret_total;
	}

	update_global_sn(cq_poll_sn, ret_total);

	m_p_ring->m_gro_mgr.flush_all(NULL);

	m_n_wce_counter = 0;
	m_b_was_drained = false;

	/* Update cq statistics */
	m_p_cq_stat->n_rx_sw_queue_len = m_rx_queue.size();
	m_p_cq_stat->n_rx_drained_at_once_max = max(ret_total, m_p_cq_stat->n_rx_drained_at_once_max);

	return ret_total;
}