Exemplo n.º 1
0
/**
 * this function polls the CQ, and extracts the needed fields
 * upon CQE error state it will return -1
 * if a bad checksum packet or a filler bit it will return VMA_MP_RQ_BAD_PACKET
 */
int cq_mgr_mp::poll_mp_cq(uint16_t &size, uint32_t &strides_used,
			  uint32_t &flags, struct mlx5_cqe64 *&out_cqe64)
{
	struct mlx5_cqe64 *cqe= check_cqe();
	if (likely(cqe)) {
		if (unlikely(MLX5_CQE_OPCODE(cqe->op_own) != MLX5_CQE_RESP_SEND)) {
			cq_logdbg("Warning op_own is %x", MLX5_CQE_OPCODE(cqe->op_own));
			// optimize checks in ring by setting size non zero
			if (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) {
				cq_logdbg("poll_length, CQE response error, "
					 "syndrome=0x%x, vendor syndrome error=0x%x, "
					 "HW syndrome 0x%x, HW syndrome type 0x%x\n",
					 ((struct mlx5_err_cqe *)cqe)->syndrome,
					 ((struct mlx5_err_cqe *)cqe)->vendor_err_synd,
					 ((struct mlx5_err_cqe *)cqe)->hw_err_synd,
					 ((struct mlx5_err_cqe *)cqe)->hw_synd_type);
			}
			size = 1;
			m_p_cq_stat->n_rx_pkt_drop++;
			return -1;
		}
		m_p_cq_stat->n_rx_pkt_drop += cqe->sop_qpn.sop;
		out_cqe64 = cqe;
		uint32_t stride_byte_cnt = ntohl(cqe->byte_cnt);
		strides_used = (stride_byte_cnt & MP_RQ_NUM_STRIDES_FIELD_MASK) >>
				MP_RQ_NUM_STRIDES_FIELD_SHIFT;
		flags = (!!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) * IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK) |
			(!!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) * IBV_EXP_CQ_RX_IP_CSUM_OK);
		if (likely(flags == UDP_OK_FLAGS)) {
			size = stride_byte_cnt & MP_RQ_BYTE_CNT_FIELD_MASK;
		} else {
			// if CSUM is bad it can be either filler or bad packet
			flags = VMA_MP_RQ_BAD_PACKET;
			size = 1;
			if (stride_byte_cnt & MP_RQ_FILLER_FIELD_MASK) {
				m_p_cq_stat->n_rx_pkt_drop++;
			}
		}
		++m_mlx5_cq.cq_ci;
		prefetch((uint8_t*)m_mlx5_cq.cq_buf + ((m_mlx5_cq.cq_ci & (m_mlx5_cq.cqe_count - 1)) << m_mlx5_cq.cqe_size_log));
	} else {
Exemplo n.º 2
0
mem_buf_desc_t* cq_mgr_mlx5::poll(enum buff_status_e& status)
{
	mem_buf_desc_t *buff = NULL;

#ifdef RDTSC_MEASURE_RX_VMA_TCP_IDLE_POLL
	RDTSC_TAKE_END(RDTSC_FLOW_RX_VMA_TCP_IDLE_POLL);
#endif //RDTSC_MEASURE_RX_VMA_TCP_IDLE_POLL

#if defined(RDTSC_MEASURE_RX_VERBS_READY_POLL) || defined(RDTSC_MEASURE_RX_VERBS_IDLE_POLL)
	RDTSC_TAKE_START_RX_VERBS_POLL(RDTSC_FLOW_RX_VERBS_READY_POLL, RDTSC_FLOW_RX_VERBS_IDLE_POLL);
#endif //RDTSC_MEASURE_RX_VERBS_READY_POLL || RDTSC_MEASURE_RX_VERBS_IDLE_POLL

	if (unlikely(NULL == m_rx_hot_buffer)) {
		if (likely(m_rq->tail != m_rq->head)) {
			uint32_t index = m_rq->tail & (m_qp_rec.qp->m_rx_num_wr - 1);
			m_rx_hot_buffer = (mem_buf_desc_t *)m_p_rq_wqe_idx_to_wrid[index];
			m_p_rq_wqe_idx_to_wrid[index] = 0;
			prefetch((void*)m_rx_hot_buffer);
			prefetch((uint8_t*)m_cqes + ((m_cq_cons_index & (m_cq_size - 1)) << m_cqe_log_sz));
		} else {
#ifdef RDTSC_MEASURE_RX_VERBS_IDLE_POLL
			RDTSC_TAKE_END(RDTSC_FLOW_RX_VERBS_IDLE_POLL);
#endif

#if defined(RDTSC_MEASURE_RX_VMA_TCP_IDLE_POLL) || defined(RDTSC_MEASURE_RX_CQE_RECEIVEFROM)
			RDTSC_TAKE_START_VMA_IDLE_POLL_CQE_TO_RECVFROM(RDTSC_FLOW_RX_VMA_TCP_IDLE_POLL,
					RDTSC_FLOW_RX_CQE_TO_RECEIVEFROM);
#endif //RDTSC_MEASURE_RX_VMA_TCP_IDLE_POLL || RDTSC_MEASURE_RX_CQE_RECEIVEFROM
			/* If rq_tail and rq_head are pointing to the same wqe,
			 * the wq is empty and there is no cqe to be received */
			return NULL;
		}
	}
	mlx5_cqe64 *cqe = check_cqe();
	if (likely(cqe)) {
		/* Update the consumer index */
		++m_cq_cons_index;
		rmb();
		cqe64_to_mem_buff_desc(cqe, m_rx_hot_buffer, status);
		++m_rq->tail;
		*m_cq_dbell = htonl(m_cq_cons_index & 0xffffff);
		buff = m_rx_hot_buffer;
		m_rx_hot_buffer = NULL;

#ifdef RDTSC_MEASURE_RX_VERBS_READY_POLL
		RDTSC_TAKE_END(RDTSC_FLOW_RX_VERBS_READY_POLL);
#endif //RDTSC_MEASURE_RX_VERBS_READY_POLL

#ifdef RDTSC_MEASURE_RX_READY_POLL_TO_LWIP
		RDTSC_TAKE_START(RDTSC_FLOW_RX_READY_POLL_TO_LWIP);
#endif
	} else {
#ifdef RDTSC_MEASURE_RX_VERBS_IDLE_POLL
		RDTSC_TAKE_END(RDTSC_FLOW_RX_VERBS_IDLE_POLL);
#endif

#if defined(RDTSC_MEASURE_RX_VMA_TCP_IDLE_POLL) || defined(RDTSC_MEASURE_RX_CQE_RECEIVEFROM)
		RDTSC_TAKE_START_VMA_IDLE_POLL_CQE_TO_RECVFROM(RDTSC_FLOW_RX_VMA_TCP_IDLE_POLL,
			RDTSC_FLOW_RX_CQE_TO_RECEIVEFROM);
#endif //RDTSC_MEASURE_RX_VMA_TCP_IDLE_POLL || RDTSC_MEASURE_RX_CQE_RECEIVEFROM

		prefetch((void*)m_rx_hot_buffer);
	}

	prefetch((uint8_t*)m_cqes + ((m_cq_cons_index & (m_cq_size - 1)) << m_cqe_log_sz));

	return buff;
}