static void ef10_tx_event_ts_enabled(ef_vi* evq, const ef_vi_event* ev, ef_event** evs, int* evs_len) { EF_VI_ASSERT(evq->vi_flags & EF_VI_TX_TIMESTAMPS); /* When TX timestamping is enabled, we get three events for * every transmit. A TX completion and two timestamp events. * We ignore the completion and store the first timestamp in * the per TXQ state. On the second timestamp we retrieve the * first one and construct a EF_EVENT_TYPE_TX_WITH_TIMESTAMP * event to send to the user. */ if(QWORD_GET_U(ESF_DZ_TX_SOFT1, *ev) == TX_TIMESTAMP_EVENT_TX_EV_COMPLETION) { /* TX completion event. Ignored */ } else if(QWORD_GET_U(ESF_DZ_TX_SOFT1, *ev) == TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO) { ef_vi_txq_state* qs = &evq->ep_state->txq; EF_VI_DEBUG( EF_VI_BUG_ON(qs->ts_nsec != EF_VI_TX_TIMESTAMP_TS_NSEC_INVALID) ); qs->ts_nsec = (((((uint64_t)timestamp_extract(*ev)) * 1000000000UL) >> 29) << 2) | evq->ep_state->evq.sync_flags; }
ef_vi_inline void falcon_tx_event(ef_event* ev_out, const ef_vi_event* ev) { /* Danger danger! No matter what we ask for wrt batching, we ** will get a batched event every 16 descriptors, and we also ** get dma-queue-empty events. i.e. Duplicates are expected. ** ** In addition, if it's been requested in the descriptor, we ** get an event per descriptor. (We don't currently request ** this). */ ev_out->tx.q_id = QWORD_GET_U(TX_EV_Q_LABEL, *ev); ev_out->tx.desc_id = QWORD_GET_U(TX_EV_DESC_PTR, *ev) + 1; if(likely( QWORD_TEST_BIT(TX_EV_COMP, *ev) )) { ev_out->tx.type = EF_EVENT_TYPE_TX; } else { ev_out->tx_error.type = EF_EVENT_TYPE_TX_ERROR; if(likely( QWORD_TEST_BIT(TX_EV_BUF_OWNER_ID_ERR, *ev) )) ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_RIGHTS; else if(likely( QWORD_TEST_BIT(TX_EV_WQ_FF_FULL, *ev) )) ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_OFLOW; else if(likely( QWORD_TEST_BIT(TX_EV_PKT_TOO_BIG, *ev) )) ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_2BIG; else if(likely( QWORD_TEST_BIT(TX_EV_PKT_ERR, *ev) )) ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_BUS; } }
ef_vi_inline void ef10_packed_stream_rx_event(ef_vi* evq_vi, const ef_vi_event* ev, ef_event** evs, int* evs_len) { unsigned q_label = QWORD_GET_U(ESF_DZ_RX_QLABEL, *ev); unsigned short_pc = QWORD_GET_U(ESF_DZ_RX_DSC_PTR_LBITS, *ev); unsigned pkt_count_range = (1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH); const ci_uint32 discard_mask = CI_BSWAPC_LE32(1 << ESF_DZ_RX_ECC_ERR_LBN | 1 << ESF_DZ_RX_CRC1_ERR_LBN | 1 << ESF_DZ_RX_CRC0_ERR_LBN | 1 << ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN | 1 << ESF_DZ_RX_IPCKSUM_ERR_LBN | 1 << ESF_DZ_RX_ECRC_ERR_LBN); ef_vi* vi = evq_vi->vi_qs[q_label]; ef_event* ev_out = (*evs)++; --(*evs_len); ev_out->rx_packed_stream.type = EF_EVENT_TYPE_RX_PACKED_STREAM; ev_out->rx_packed_stream.q_id = q_label; ev_out->rx_packed_stream.n_pkts = (pkt_count_range + short_pc - vi->ep_state->rxq.rx_ps_pkt_count) % pkt_count_range; ev_out->rx_packed_stream.flags = 0; ev_out->rx_packed_stream.ps_flags = 0; vi->ep_state->rxq.rx_ps_pkt_count = short_pc; if (unlikely( QWORD_GET_U(ESF_DZ_RX_EV_ROTATE, *ev) )) { unsigned desc_id; desc_id = evq_vi->ep_state->rxq.removed & vi->vi_rxq.mask; vi->vi_rxq.ids[desc_id] = EF_REQUEST_ID_MASK; ++evq_vi->ep_state->rxq.removed; EF_VI_ASSERT(vi->ep_state->rxq.rx_ps_credit_avail > 0); --vi->ep_state->rxq.rx_ps_credit_avail; ev_out->rx_packed_stream.flags |= EF_EVENT_FLAG_PS_NEXT_BUFFER; } EF_VI_ASSERT(ev_out->rx_packed_stream.n_pkts <= 8); EF_VI_ASSERT(ev_out->rx_packed_stream.n_pkts > 0 || QWORD_GET_U(ESF_DZ_RX_CONT, *ev)); if (likely( ! ((ev->u32[0] & discard_mask) ))) return; if (QWORD_GET_U(ESF_DZ_RX_ECC_ERR, *ev) | QWORD_GET_U(ESF_DZ_RX_CRC1_ERR, *ev) | QWORD_GET_U(ESF_DZ_RX_CRC0_ERR, *ev) | QWORD_GET_U(ESF_DZ_RX_ECRC_ERR, *ev)) ev_out->rx_packed_stream.ps_flags |= EF_VI_PS_FLAG_BAD_FCS; if (QWORD_GET_U(ESF_DZ_RX_TCPUDP_CKSUM_ERR, *ev)) ev_out->rx_packed_stream.ps_flags |= EF_VI_PS_FLAG_BAD_L4_CSUM; if (QWORD_GET_U(ESF_DZ_RX_IPCKSUM_ERR, *ev)) ev_out->rx_packed_stream.ps_flags |= EF_VI_PS_FLAG_BAD_L3_CSUM; }
ef_vi_inline void ef10_rx_event(ef_vi* evq_vi, const ef_vi_event* ev, ef_event** evs, int* evs_len) { unsigned lbits_mask = __EFVI_MASK(ESF_DZ_RX_DSC_PTR_LBITS_WIDTH, unsigned); unsigned q_label = QWORD_GET_U(ESF_DZ_RX_QLABEL, *ev); unsigned short_di, desc_i, q_mask; ef_vi *vi; vi = evq_vi->vi_qs[q_label]; if (likely(vi != NULL)) { q_mask = vi->vi_rxq.mask; short_di = QWORD_GET_U(ESF_DZ_RX_DSC_PTR_LBITS, *ev); desc_i = (vi->ep_state->rxq.removed + ((short_di - vi->ep_state->rxq.removed) & lbits_mask) - 1) & q_mask; huntington_rx_desc_consumed(vi, ev, evs, evs_len, q_label, desc_i); } else { INC_ERROR_STAT(evq_vi, rx_ev_bad_q_label); } }
ef_vi_inline void falcon_rx_event(ef_vi* evq_vi, const ef_vi_event* ev, ef_event** evs, int* evs_len) { unsigned q_label = QWORD_GET_U(RX_EV_Q_LABEL, *ev); unsigned desc_i, q_mask; ef_vi *vi; vi = evq_vi->vi_qs[q_label]; if (likely(vi != NULL)) { q_mask = vi->vi_rxq.mask; desc_i = q_mask & CI_QWORD_FIELD(*ev, RX_EV_DESC_PTR); if (likely(desc_i == (vi->ep_state->rxq.removed & q_mask))) falcon_rx_desc_consumed(vi, ev, evs, evs_len, q_label, desc_i); else if (!((desc_i + 1 - vi->ep_state->rxq.removed) & q_mask)) falcon_rx_no_desc_trunc(evs, evs_len, q_label); else falcon_rx_unexpected(vi, ev, evs, evs_len, q_label, desc_i); } else { INC_ERROR_STAT(evq_vi, rx_ev_bad_q_label); } }
ef_vi_inline void falcon_rx_desc_consumed(ef_vi* vi, const ef_vi_event* ev, ef_event** evs, int* evs_len, int q_label, int desc_i) { ef_event* ev_out = (*evs)++; --(*evs_len); ev_out->rx.q_id = q_label; ev_out->rx.rq_id = vi->vi_rxq.ids[desc_i]; vi->vi_rxq.ids[desc_i] = EF_REQUEST_ID_MASK; /* ?? killme */ ++vi->ep_state->rxq.removed; if( QWORD_TEST_BIT(RX_SOP, *ev) ) ev_out->rx.flags = EF_EVENT_FLAG_SOP; else ev_out->rx.flags = 0; if( QWORD_TEST_BIT(RX_JUMBO_CONT, *ev) ) ev_out->rx.flags |= EF_EVENT_FLAG_CONT; if(likely( QWORD_TEST_BIT(RX_EV_PKT_OK, *ev) )) { dont_discard: ev_out->rx.len = QWORD_GET_U(RX_EV_BYTE_CNT, *ev); ev_out->rx.type = EF_EVENT_TYPE_RX; if( QWORD_TEST_BIT(RX_iSCSI_PKT_OK, *ev) ) ev_out->rx.flags |= EF_EVENT_FLAG_ISCSI_OK; if( QWORD_TEST_BIT(RX_EV_MCAST_PKT, *ev) ) { int match = QWORD_TEST_BIT(RX_EV_MCAST_HASH_MATCH,*ev); ev_out->rx.flags |= EF_EVENT_FLAG_MULTICAST; if(unlikely( ! match )) goto discard; } } else { discard: ev_out->rx_discard.len = QWORD_GET_U(RX_EV_BYTE_CNT, *ev); ev_out->rx_discard.type = EF_EVENT_TYPE_RX_DISCARD; /* Order matters here: more fundamental errors first. */ if( QWORD_TEST_BIT(RX_EV_BUF_OWNER_ID_ERR, *ev) ) ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_RIGHTS; else if( QWORD_TEST_BIT(RX_EV_FRM_TRUNC, *ev) ) ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_TRUNC; else if( QWORD_TEST_BIT(RX_EV_ETH_CRC_ERR, *ev) ) ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_CRC_BAD; else if( QWORD_TEST_BIT(RX_EV_MCAST_PKT, *ev) && ! QWORD_TEST_BIT(RX_EV_MCAST_HASH_MATCH,*ev) ) ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_MCAST_MISMATCH; else if( QWORD_TEST_BIT(RX_EV_IP_HDR_CHKSUM_ERR, *ev) ) ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_CSUM_BAD; else if( QWORD_TEST_BIT(RX_EV_TCP_UDP_CHKSUM_ERR, *ev) ) ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_CSUM_BAD; else if( QWORD_TEST_BIT(RX_EV_TOBE_DISC, *ev) ) ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_OTHER; else if( QWORD_TEST_BIT(RX_EV_IP_FRAG_ERR, *ev) ) goto dont_discard; else ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_OTHER; } }
ef_vi_inline void huntington_rx_desc_consumed(ef_vi* vi, const ef_vi_event* ev, ef_event** evs, int* evs_len, int q_label, int desc_i) { ef_vi_rxq_state* qs = &vi->ep_state->rxq; /* ABORT bit not included in this as it is not set by fw */ const ci_uint32 discard_mask = CI_BSWAPC_LE32(1 << ESF_DZ_RX_ECC_ERR_LBN | 1 << ESF_DZ_RX_CRC1_ERR_LBN | 1 << ESF_DZ_RX_CRC0_ERR_LBN | 1 << ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN | 1 << ESF_DZ_RX_IPCKSUM_ERR_LBN | 1 << ESF_DZ_RX_ECRC_ERR_LBN); ef_event* ev_out = (*evs)++; unsigned rx_bytes; --(*evs_len); ev_out->rx.q_id = q_label; ev_out->rx.rq_id = vi->vi_rxq.ids[desc_i]; vi->vi_rxq.ids[desc_i] = EF_REQUEST_ID_MASK; /* ?? killme */ rx_bytes = QWORD_GET_U(ESF_DZ_RX_BYTES, *ev); ev_out->rx.type = EF_EVENT_TYPE_RX; if( ! qs->in_jumbo ) { ev_out->rx.flags = EF_EVENT_FLAG_SOP; qs->bytes_acc = rx_bytes; } else { ev_out->rx.flags = 0; qs->bytes_acc += rx_bytes; } if( ! QWORD_GET_U(ESF_DZ_RX_CONT, *ev) ) qs->in_jumbo = 0; else { ev_out->rx.flags |= EF_EVENT_FLAG_CONT; ++qs->in_jumbo; } ev_out->rx.len = qs->bytes_acc; if( QWORD_GET_U(ESF_DZ_RX_MAC_CLASS, *ev) == ESE_DZ_MAC_CLASS_MCAST) { ev_out->rx.flags |= EF_EVENT_FLAG_MULTICAST; } /* Consider rx_bytes == 0 to indicate that the abort bit * should have been set but wasn't - i.e. it's a frame * trunc */ if(likely( ! ((ev->u32[0] & discard_mask) || (rx_bytes == 0)) )) { ++vi->ep_state->rxq.removed; return; } if( rx_bytes == 0 ) { /* If this is an abort then we didn't really consume a * descriptor, so don't increment removed count. */ ev_out->rx_discard.type = EF_EVENT_TYPE_RX_NO_DESC_TRUNC; return; } ev_out->rx_discard.len = qs->bytes_acc; ev_out->rx_discard.type = EF_EVENT_TYPE_RX_DISCARD; if( QWORD_GET_U(ESF_DZ_RX_ECC_ERR, *ev) | QWORD_GET_U(ESF_DZ_RX_CRC1_ERR, *ev) | QWORD_GET_U(ESF_DZ_RX_CRC0_ERR, *ev) | QWORD_GET_U(ESF_DZ_RX_ECRC_ERR, *ev) ) ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_CRC_BAD; else /* TCPUDP_CKSUM or IPCKSUM error */ ev_out->rx_discard.subtype = EF_EVENT_RX_DISCARD_CSUM_BAD; ++vi->ep_state->rxq.removed; }
static uint32_t timestamp_extract(ef_vi_event ev) { uint32_t lo = QWORD_GET_U(ESF_DZ_TX_DESCR_INDX, ev); uint32_t hi = QWORD_GET_U(ESF_DZ_TX_SOFT2, ev); return (hi << 16) | lo; }