static void rtwn_usb_reset_tx_list(struct rtwn_usb_softc *uc, rtwn_datahead *head, struct ieee80211vap *vap) { struct rtwn_vap *uvp = RTWN_VAP(vap); struct rtwn_data *dp, *tmp; int id; id = (uvp != NULL ? uvp->id : RTWN_VAP_ID_INVALID); STAILQ_FOREACH_SAFE(dp, head, next, tmp) { if (vap == NULL || (dp->ni == NULL && (dp->id == id || id == RTWN_VAP_ID_INVALID)) || (dp->ni != NULL && dp->ni->ni_vap == vap)) { if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } if (dp->m != NULL) { m_freem(dp->m); dp->m = NULL; } STAILQ_REMOVE(head, dp, rtwn_data, next); STAILQ_INSERT_TAIL(&uc->uc_tx_inactive, dp, next); } } }
static void rtwn_pci_beacon_update_begin(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct rtwn_vap *rvp = RTWN_VAP(vap); RTWN_ASSERT_LOCKED(sc); rtwn_beacon_enable(sc, rvp->id, 0); }
static void rtwn_pci_beacon_update_end(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct rtwn_vap *rvp = RTWN_VAP(vap); RTWN_ASSERT_LOCKED(sc); if (rvp->curr_mode != R92C_MSR_NOLINK) rtwn_beacon_enable(sc, rvp->id, 1); }
void r12a_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m, void *buf, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct rtwn_vap *uvp = RTWN_VAP(vap); struct ieee80211_frame *wh; struct r12a_tx_desc *txd; uint8_t ridx; int ismcast; /* XXX TODO: 11n checks, matching rtwn_fill_tx_desc() */ wh = mtod(m, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); ridx = rate2ridx(params->ibp_rate0); /* Fill Tx descriptor. */ txd = (struct r12a_tx_desc *)buf; txd->flags0 |= R12A_FLAGS0_LSG | R12A_FLAGS0_FSG; if (ismcast) txd->flags0 |= R12A_FLAGS0_BMCAST; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) { txd->txdw4 = htole32(R12A_TXDW4_RETRY_LMT_ENA); txd->txdw4 |= htole32(SM(R12A_TXDW4_RETRY_LMT, params->ibp_try0)); } if (params->ibp_flags & IEEE80211_BPF_RTS) r12a_tx_protection(sc, txd, IEEE80211_PROT_RTSCTS, ridx); if (params->ibp_flags & IEEE80211_BPF_CTS) r12a_tx_protection(sc, txd, IEEE80211_PROT_CTSONLY, ridx); txd->txdw1 |= htole32(SM(R12A_TXDW1_MACID, RTWN_MACID_BC)); txd->txdw1 |= htole32(SM(R12A_TXDW1_QSEL, R12A_TXDW1_QSEL_MGNT)); /* Set TX rate index. */ txd->txdw4 |= htole32(SM(R12A_TXDW4_DATARATE, ridx)); txd->txdw4 |= htole32(SM(R12A_TXDW4_DATARATE_FB_LMT, 0x1f)); txd->txdw6 |= htole32(SM(R21A_TXDW6_MBSSID, uvp->id)); txd->txdw3 |= htole32(R12A_TXDW3_DRVRATE); r12a_tx_raid(sc, txd, ni, ismcast); if (!IEEE80211_QOS_HAS_SEQ(wh)) { /* Use HW sequence numbering for non-QoS frames. */ txd->txdw8 |= htole32(R12A_TXDW8_HWSEQ_EN); txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, uvp->id)); } else { /* Set sequence number. */ txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE)); } }
static void r21au_vap_preattach(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct rtwn_vap *rvp = RTWN_VAP(vap); struct r12a_softc *rs = sc->sc_priv; r12a_vap_preattach(sc, vap); /* Install DFS newstate handler (non-monitor vaps only). */ if (rvp->id != RTWN_VAP_ID_INVALID) { KASSERT(rvp->id >= 0 && rvp->id <= nitems(rs->rs_newstate), ("%s: wrong vap id %d\n", __func__, rvp->id)); rs->rs_newstate[rvp->id] = vap->iv_newstate; vap->iv_newstate = r21au_newstate; } }
/* * Drop stale entries from Tx ring before the vap will be deleted. * In case if vap is NULL just free everything and reset cur / last pointers. */ static void rtwn_pci_reset_tx_list(struct rtwn_softc *sc, struct ieee80211vap *vap, int qid) { int i; if (vap == NULL) { if (qid != RTWN_PCI_BEACON_QUEUE) { /* * Device was stopped; just clear all entries. */ rtwn_pci_reset_tx_ring_stopped(sc, qid); } else { for (i = 0; i < RTWN_PORT_COUNT; i++) rtwn_pci_reset_beacon_ring(sc, i); } } else if (qid == RTWN_PCI_BEACON_QUEUE && (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS)) { struct rtwn_vap *uvp = RTWN_VAP(vap); rtwn_pci_reset_beacon_ring(sc, uvp->id); } else { struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc); struct rtwn_tx_ring *ring = &pc->tx_ring[qid]; for (i = 0; i < RTWN_PCI_TX_LIST_COUNT; i++) { struct rtwn_tx_data *data = &ring->tx_data[i]; if (data->ni != NULL && data->ni->ni_vap == vap) { /* * NB: if some vap is still running * rtwn_pci_tx_done() will free the mbuf; * otherwise, rtwn_stop() will reset all rings * after device shutdown. */ ieee80211_free_node(data->ni); data->ni = NULL; } } } }
void r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m, void *buf, uint8_t ridx, int maxretry) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; struct rtwn_vap *uvp = RTWN_VAP(vap); struct ieee80211_frame *wh; struct r12a_tx_desc *txd; enum ieee80211_protmode prot; uint8_t type, tid, qos, qsel; int hasqos, ismcast, macid; wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; hasqos = IEEE80211_QOS_HAS_SEQ(wh); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); /* Select TX ring for this frame. */ if (hasqos) { qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid = qos & IEEE80211_QOS_TID; } else { qos = 0; tid = 0; } /* Fill Tx descriptor. */ txd = (struct r12a_tx_desc *)buf; txd->flags0 |= R12A_FLAGS0_LSG | R12A_FLAGS0_FSG; if (ismcast) txd->flags0 |= R12A_FLAGS0_BMCAST; if (!ismcast) { /* Unicast frame, check if an ACK is expected. */ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK) { txd->txdw4 = htole32(R12A_TXDW4_RETRY_LMT_ENA); txd->txdw4 |= htole32(SM(R12A_TXDW4_RETRY_LMT, maxretry)); } struct rtwn_node *un = RTWN_NODE(ni); macid = un->id; if (type == IEEE80211_FC0_TYPE_DATA) { qsel = tid % RTWN_MAX_TID; if (m->m_flags & M_AMPDU_MPDU) { txd->txdw2 |= htole32(R12A_TXDW2_AGGEN); txd->txdw2 |= htole32(SM(R12A_TXDW2_AMPDU_DEN, vap->iv_ampdu_density)); txd->txdw3 |= htole32(SM(R12A_TXDW3_MAX_AGG, 0x1f)); /* XXX */ } else txd->txdw2 |= htole32(R12A_TXDW2_AGGBK); if (sc->sc_ratectl == RTWN_RATECTL_NET80211) { txd->txdw2 |= htole32(R12A_TXDW2_SPE_RPT); sc->sc_tx_n_active++; } if (RTWN_RATE_IS_CCK(ridx) && ridx != RTWN_RIDX_CCK1 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT); prot = IEEE80211_PROT_NONE; if (ridx >= RTWN_RIDX_MCS(0)) { r12a_tx_set_ht40(sc, txd, ni); r12a_tx_set_sgi(sc, txd, ni); prot = ic->ic_htprotmode; } else if (ic->ic_flags & IEEE80211_F_USEPROT) prot = ic->ic_protmode; /* XXX fix last comparison for A-MSDU (in net80211) */ /* XXX A-MPDU? */ if (m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold && vap->iv_rtsthreshold != IEEE80211_RTS_MAX) prot = IEEE80211_PROT_RTSCTS; if (prot != IEEE80211_PROT_NONE) r12a_tx_protection(sc, txd, prot, ridx); } else /* IEEE80211_FC0_TYPE_MGT */ qsel = R12A_TXDW1_QSEL_MGNT; } else { macid = RTWN_MACID_BC; qsel = R12A_TXDW1_QSEL_MGNT; } txd->txdw1 |= htole32(SM(R12A_TXDW1_QSEL, qsel)); txd->txdw1 |= htole32(SM(R12A_TXDW1_MACID, macid)); txd->txdw4 |= htole32(SM(R12A_TXDW4_DATARATE, ridx)); /* Data rate fallback limit (max). */ txd->txdw4 |= htole32(SM(R12A_TXDW4_DATARATE_FB_LMT, 0x1f)); /* XXX recheck for non-21au */ txd->txdw6 |= htole32(SM(R21A_TXDW6_MBSSID, uvp->id)); r12a_tx_raid(sc, txd, ni, ismcast); /* Force this rate if needed. */ if (sc->sc_ratectl != RTWN_RATECTL_FW) txd->txdw3 |= htole32(R12A_TXDW3_DRVRATE); if (!hasqos) { /* Use HW sequence numbering for non-QoS frames. */ txd->txdw8 |= htole32(R12A_TXDW8_HWSEQ_EN); txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, uvp->id)); } else { uint16_t seqno; if (m->m_flags & M_AMPDU_MPDU) { seqno = ni->ni_txseqs[tid]; /* NB: clear Fragment Number field. */ *(uint16_t *)wh->i_seq = 0; ni->ni_txseqs[tid]++; } else seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE; /* Set sequence number. */ txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, seqno)); } }