static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; uint16_t pkts_sent, pkts_remain; uint16_t total_sent = 0; int ret = 0; CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n", __func__, txq, tx_pkts, nb_pkts); t4_os_lock(&txq->txq_lock); /* free up desc from already completed tx */ reclaim_completed_tx(&txq->q); while (total_sent < nb_pkts) { pkts_remain = nb_pkts - total_sent; for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent]); if (ret < 0) break; } if (!pkts_sent) break; total_sent += pkts_sent; /* reclaim as much as possible */ reclaim_completed_tx(&txq->q); } t4_os_unlock(&txq->txq_lock); return total_sent; }
/** * Allocate a available free entry */ int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family) { struct tid_info *t = &adap->tids; int pos; int size = t->nftids; t4_os_lock(&t->ftid_lock); if (family == FILTER_TYPE_IPV6) pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4); else pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size); t4_os_unlock(&t->ftid_lock); return pos < size ? pos : -1; }
/** * Check if entry already filled. */ bool is_filter_set(struct tid_info *t, int fidx, int family) { bool result = FALSE; int i, max; /* IPv6 requires four slots and IPv4 requires only 1 slot. * Ensure, there's enough slots available. */ max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx; t4_os_lock(&t->ftid_lock); for (i = fidx; i <= max; i++) { if (rte_bitmap_get(t->ftid_bmap, i)) { result = TRUE; break; } } t4_os_unlock(&t->ftid_lock); return result; }