예제 #1
0
/**
 * this function polls the CQ, and extracts the needed fields
 * upon CQE error state it will return -1
 * if a bad checksum packet or a filler bit it will return VMA_MP_RQ_BAD_PACKET
 */
int cq_mgr_mp::poll_mp_cq(uint16_t &size, uint32_t &strides_used,
			  uint32_t &flags, struct mlx5_cqe64 *&out_cqe64)
{
	struct mlx5_cqe64 *cqe= check_cqe();
	if (likely(cqe)) {
		if (unlikely(MLX5_CQE_OPCODE(cqe->op_own) != MLX5_CQE_RESP_SEND)) {
			cq_logdbg("Warning op_own is %x", MLX5_CQE_OPCODE(cqe->op_own));
			// optimize checks in ring by setting size non zero
			if (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) {
				cq_logdbg("poll_length, CQE response error, "
					 "syndrome=0x%x, vendor syndrome error=0x%x, "
					 "HW syndrome 0x%x, HW syndrome type 0x%x\n",
					 ((struct mlx5_err_cqe *)cqe)->syndrome,
					 ((struct mlx5_err_cqe *)cqe)->vendor_err_synd,
					 ((struct mlx5_err_cqe *)cqe)->hw_err_synd,
					 ((struct mlx5_err_cqe *)cqe)->hw_synd_type);
			}
			size = 1;
			m_p_cq_stat->n_rx_pkt_drop++;
			return -1;
		}
		m_p_cq_stat->n_rx_pkt_drop += cqe->sop_qpn.sop;
		out_cqe64 = cqe;
		uint32_t stride_byte_cnt = ntohl(cqe->byte_cnt);
		strides_used = (stride_byte_cnt & MP_RQ_NUM_STRIDES_FIELD_MASK) >>
				MP_RQ_NUM_STRIDES_FIELD_SHIFT;
		flags = (!!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) * IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK) |
			(!!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) * IBV_EXP_CQ_RX_IP_CSUM_OK);
		if (likely(flags == UDP_OK_FLAGS)) {
			size = stride_byte_cnt & MP_RQ_BYTE_CNT_FIELD_MASK;
		} else {
			// if CSUM is bad it can be either filler or bad packet
			flags = VMA_MP_RQ_BAD_PACKET;
			size = 1;
			if (stride_byte_cnt & MP_RQ_FILLER_FIELD_MASK) {
				m_p_cq_stat->n_rx_pkt_drop++;
			}
		}
		++m_mlx5_cq.cq_ci;
		prefetch((uint8_t*)m_mlx5_cq.cq_buf + ((m_mlx5_cq.cq_ci & (m_mlx5_cq.cqe_count - 1)) << m_mlx5_cq.cqe_size_log));
	} else {
예제 #2
0
inline void cq_mgr_mlx5::cqe64_to_mem_buff_desc(struct mlx5_cqe64 *cqe, mem_buf_desc_t* p_rx_wc_buf_desc, enum buff_status_e &status)
{
	struct mlx5_err_cqe *ecqe;
	ecqe = (struct mlx5_err_cqe *)cqe;

	switch (MLX5_CQE_OPCODE(cqe->op_own)) {
		case MLX5_CQE_RESP_WR_IMM:
			cq_logerr("IBV_WC_RECV_RDMA_WITH_IMM is not supported");
			status = BS_CQE_RESP_WR_IMM_NOT_SUPPORTED;
			break;
		case MLX5_CQE_RESP_SEND:
		case MLX5_CQE_RESP_SEND_IMM:
		case MLX5_CQE_RESP_SEND_INV:
		{
			status = BS_OK;
			p_rx_wc_buf_desc->rx.hw_raw_timestamp = ntohll(cqe->timestamp);
			p_rx_wc_buf_desc->rx.flow_tag_id      = vma_get_flow_tag(cqe);

#ifdef DEFINED_MLX5_HW_ETH_WQE_HEADER
			p_rx_wc_buf_desc->rx.is_sw_csum_need = !(m_b_is_rx_hw_csum_on && (cqe->hds_ip_ext & MLX5_CQE_L4_OK) && (cqe->hds_ip_ext & MLX5_CQE_L3_OK));
#else
			p_rx_wc_buf_desc->rx.is_sw_csum_need = !m_b_is_rx_hw_csum_on; /* we assume that the checksum is ok */
#endif
			p_rx_wc_buf_desc->sz_data = ntohl(cqe->byte_cnt);
			return;
		}
		case MLX5_CQE_INVALID: /* No cqe!*/
		{
			cq_logerr("We should no receive a buffer without a cqe\n");
			status = BS_CQE_INVALID;
			break;
		}
		case MLX5_CQE_REQ:
		case MLX5_CQE_SIG_ERR:
		case MLX5_CQE_REQ_ERR:
		case MLX5_CQE_RESP_ERR:
		default:
		{
			if (MLX5_CQE_SYNDROME_WR_FLUSH_ERR == ecqe->syndrome) {
				status = BS_IBV_WC_WR_FLUSH_ERR;
			} else {
				status = BS_GENERAL_ERR;
			}
			/*
			  IB compliant completion with error syndrome:
			  0x1: Local_Length_Error
			  0x2: Local_QP_Operation_Error
			  0x4: Local_Protection_Error
			  0x5: Work_Request_Flushed_Error
			  0x6: Memory_Window_Bind_Error
			  0x10: Bad_Response_Error
			  0x11: Local_Access_Error
			  0x12: Remote_Invalid_Request_Error
			  0x13: Remote_Access_Error
			  0x14: Remote_Operation_Error
			  0x15: Transport_Retry_Counter_Exceeded
			  0x16: RNR_Retry_Counter_Exceeded
			  0x22: Aborted_Error
			  other: Reserved
			 */
			break;
		}
	}
}