adf_nbuf_t htt_tx_send_batch( htt_pdev_handle pdev, adf_nbuf_t head_msdu, int num_msdus) { adf_nbuf_t rejected = NULL; u_int16_t *msdu_id_storage; u_int16_t msdu_id; adf_nbuf_t msdu; /* * FOR NOW, iterate through the batch, sending the frames singly. * Eventually HTC and HIF should be able to accept a batch of * data frames rather than singles. */ msdu = head_msdu; while (num_msdus--) { adf_nbuf_t next_msdu = adf_nbuf_next(msdu); msdu_id_storage = ol_tx_msdu_id_storage(msdu); msdu_id = *msdu_id_storage; /* htt_tx_send_base returns 0 as success and 1 as failure */ if (htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len, num_msdus)) { adf_nbuf_set_next(msdu, rejected); rejected = msdu; } msdu = next_msdu; } return rejected; }
void ol_rx_reorder_flush( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, unsigned seq_num_start, unsigned seq_num_end, enum htt_rx_flush_action action) { struct ol_txrx_pdev_t *pdev; unsigned win_sz_mask; struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem; adf_nbuf_t head_msdu = NULL; adf_nbuf_t tail_msdu = NULL; pdev = vdev->pdev; win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask; seq_num_start &= win_sz_mask; seq_num_end &= win_sz_mask; do { rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq_num_start]; seq_num_start = (seq_num_start + 1) & win_sz_mask; if (rx_reorder_array_elem->head) { if (head_msdu == NULL) { head_msdu = rx_reorder_array_elem->head; tail_msdu = rx_reorder_array_elem->tail; rx_reorder_array_elem->head = NULL; rx_reorder_array_elem->tail = NULL; continue; } adf_nbuf_set_next(tail_msdu, rx_reorder_array_elem->head); tail_msdu = rx_reorder_array_elem->tail; rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL; } } while (seq_num_start != seq_num_end); ol_rx_defrag_waitlist_remove(peer, tid); if (head_msdu) { /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */ adf_nbuf_set_next(tail_msdu, NULL); if (action == htt_rx_flush_release) { peer->rx_opt_proc(vdev, peer, tid, head_msdu); } else { do { adf_nbuf_t next; next = adf_nbuf_next(head_msdu); htt_rx_desc_frame_free(pdev->htt_pdev, head_msdu); head_msdu = next; } while (head_msdu); } } }
adf_nbuf_t ol_rx_pn_check_base( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, adf_nbuf_t msdu_list) { struct ol_txrx_pdev_t *pdev = vdev->pdev; union htt_rx_pn_t *last_pn; adf_nbuf_t out_list_head = NULL; adf_nbuf_t out_list_tail = NULL; adf_nbuf_t mpdu; int index; /* unicast vs. multicast */ int pn_len; void *rx_desc; int last_pn_valid; /* Make sure host pn check is not redundant */ if ((adf_os_atomic_read(&peer->fw_pn_check)) || (vdev->opmode == wlan_op_mode_ibss)) { return msdu_list; } /* First, check whether the PN check applies */ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu_list); adf_os_assert(htt_rx_msdu_has_wlan_mcast_flag(pdev->htt_pdev, rx_desc)); index = htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc) ? txrx_sec_mcast : txrx_sec_ucast; pn_len = pdev->rx_pn[peer->security[index].sec_type].len; if (pn_len == 0) { return msdu_list; } last_pn_valid = peer->tids_last_pn_valid[tid]; last_pn = &peer->tids_last_pn[tid]; mpdu = msdu_list; while (mpdu) { adf_nbuf_t mpdu_tail, next_mpdu; union htt_rx_pn_t new_pn; int pn_is_replay = 0; rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, mpdu); /* * Find the last MSDU within this MPDU, and * the find the first MSDU within the next MPDU. */ ol_rx_mpdu_list_next(pdev, mpdu, &mpdu_tail, &next_mpdu); /* Don't check the PN replay for non-encrypted frames */ if (!htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc)) { ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, mpdu_tail); mpdu = next_mpdu; continue; } /* retrieve PN from rx descriptor */ htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &new_pn, pn_len); /* if there was no prior PN, there's nothing to check */ if (last_pn_valid) { pn_is_replay = pdev->rx_pn[peer->security[index].sec_type].cmp( &new_pn, last_pn, index == txrx_sec_ucast, vdev->opmode); } else { last_pn_valid = peer->tids_last_pn_valid[tid] = 1; } if (pn_is_replay) { adf_nbuf_t msdu; static u_int32_t last_pncheck_print_time = 0; int log_level; u_int32_t current_time_ms; /* * This MPDU failed the PN check: * 1. Notify the control SW of the PN failure * (so countermeasures can be taken, if necessary) * 2. Discard all the MSDUs from this MPDU. */ msdu = mpdu; current_time_ms = adf_os_ticks_to_msecs(adf_os_ticks()); if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS < (current_time_ms - last_pncheck_print_time)) { last_pncheck_print_time = current_time_ms; log_level = TXRX_PRINT_LEVEL_WARN; } else { log_level = TXRX_PRINT_LEVEL_INFO2; } TXRX_PRINT(log_level, "PN check failed - TID %d, peer %p " "(%02x:%02x:%02x:%02x:%02x:%02x) %s\n" " old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " new seq num = %d\n", tid, peer, peer->mac_addr.raw[0], peer->mac_addr.raw[1], peer->mac_addr.raw[2], peer->mac_addr.raw[3], peer->mac_addr.raw[4], peer->mac_addr.raw[5], (index == txrx_sec_ucast) ? "ucast" : "mcast", last_pn->pn128[1], last_pn->pn128[0], last_pn->pn128[0] & 0xffffffffffffULL, new_pn.pn128[1], new_pn.pn128[0], new_pn.pn128[0] & 0xffffffffffffULL, htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_desc)); #if defined(ENABLE_RX_PN_TRACE) ol_rx_pn_trace_display(pdev, 1); #endif /* ENABLE_RX_PN_TRACE */ ol_rx_err( pdev->ctrl_pdev, vdev->vdev_id, peer->mac_addr.raw, tid, htt_rx_mpdu_desc_tsf32(pdev->htt_pdev, rx_desc), OL_RX_ERR_PN, mpdu, NULL, 0); /* free all MSDUs within this MPDU */ do { adf_nbuf_t next_msdu; OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, OL_RX_ERR_PN); next_msdu = adf_nbuf_next(msdu); htt_rx_desc_frame_free(pdev->htt_pdev, msdu); if (msdu == mpdu_tail) { break; } else { msdu = next_msdu; } } while (1); } else { ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, mpdu_tail); /* * Remember the new PN. * For simplicity, just do 2 64-bit word copies to cover the worst * case (WAPI), regardless of the length of the PN. * This is more efficient than doing a conditional branch to copy * only the relevant portion. */ last_pn->pn128[0] = new_pn.pn128[0]; last_pn->pn128[1] = new_pn.pn128[1]; OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc); } mpdu = next_mpdu; } /* make sure the list is null-terminated */ if (out_list_tail) { adf_nbuf_set_next(out_list_tail, NULL); } return out_list_head; }
void ol_rx_fwd_check( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, adf_nbuf_t msdu_list) { struct ol_txrx_pdev_t *pdev = vdev->pdev; adf_nbuf_t deliver_list_head = NULL; adf_nbuf_t deliver_list_tail = NULL; adf_nbuf_t msdu; if (OL_CFG_RAW_RX_LIKELINESS(pdev->rx_decap_mode == htt_pkt_type_raw)) { /* Forwarding is not handled since keys would reside on Access * Controller. * * Full fledged Mixed VAP functionality can add requisite exceptions in * this function. */ ol_rx_deliver(vdev, peer, tid, msdu_list); return; } msdu = msdu_list; while (msdu) { struct ol_txrx_vdev_t *tx_vdev; void *rx_desc; /* * Remember the next list elem, because our processing * may cause the MSDU to get linked into a different list. */ msdu_list = adf_nbuf_next(msdu); rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu); if (htt_rx_msdu_forward(pdev->htt_pdev, rx_desc)) { /* * Use the same vdev that received the frame to * transmit the frame. * This is exactly what we want for intra-BSS forwarding, * like STA-to-STA forwarding and multicast echo. * If this is a intra-BSS forwarding case (which is not * currently supported), then the tx vdev is different * from the rx vdev. * On the LL host the vdevs are not actually used for tx, * so it would still work to use the rx vdev rather than * the tx vdev. * For HL, the tx classification searches for the DA within * the given vdev, so we would want to get the DA peer ID * from the target, so we can locate the tx vdev. */ tx_vdev = vdev; /* * This MSDU needs to be forwarded to the tx path. * Check whether it also needs to be sent to the OS shim, * in which case we need to make a copy (or clone?). */ if (htt_rx_msdu_discard(pdev->htt_pdev, rx_desc)) { htt_rx_msdu_desc_free(pdev->htt_pdev, msdu); ol_rx_fwd_to_tx(tx_vdev, msdu); msdu = NULL; /* already handled this MSDU */ } else { adf_nbuf_t copy; copy = adf_nbuf_copy(msdu); if (copy) { ol_rx_fwd_to_tx(tx_vdev, copy); } } } if (msdu) { /* send this frame to the OS */ OL_TXRX_LIST_APPEND(deliver_list_head, deliver_list_tail, msdu); } msdu = msdu_list; } if (deliver_list_head) { adf_nbuf_set_next(deliver_list_tail, NULL); /* add NULL terminator */ ol_rx_deliver(vdev, peer, tid, deliver_list_head); } }
adf_nbuf_t ol_rx_pn_check_base( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, adf_nbuf_t msdu_list) { struct ol_txrx_pdev_t *pdev = vdev->pdev; union htt_rx_pn_t *last_pn, *global_pn, *suspect_pn; adf_nbuf_t out_list_head = NULL; adf_nbuf_t out_list_tail = NULL; adf_nbuf_t mpdu; int index; /* unicast vs. multicast */ int pn_len; void *rx_desc; int last_pn_valid; /* First, check whether the PN check applies */ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu_list); adf_os_assert(htt_rx_msdu_has_wlan_mcast_flag(pdev->htt_pdev, rx_desc)); index = htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc) ? txrx_sec_mcast : txrx_sec_ucast; pn_len = pdev->rx_pn[peer->security[index].sec_type].len; if (pn_len == 0) { return msdu_list; } last_pn_valid = peer->tids_last_pn_valid[tid]; last_pn = &peer->tids_last_pn[tid]; global_pn = &peer->global_pn; suspect_pn = &peer->tids_suspect_pn[tid]; mpdu = msdu_list; while (mpdu) { adf_nbuf_t mpdu_tail, next_mpdu; union htt_rx_pn_t new_pn; int pn_is_replay = 0, update_last_pn = 1; #if ATH_SUPPORT_WAPI bool is_mpdu_encrypted = 0; bool is_unencrypted_pkt_wai = 0; #endif rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, mpdu); /* * Find the last MSDU within this MPDU, and * the find the first MSDU within the next MPDU. */ ol_rx_mpdu_list_next(pdev, mpdu, &mpdu_tail, &next_mpdu); #if ATH_SUPPORT_WAPI /* Don't check the PN replay for non-encrypted frames or if this is a WAI packet */ is_mpdu_encrypted = htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc); is_unencrypted_pkt_wai = is_mpdu_encrypted ? false : vdev->osif_check_wai(vdev->osif_vdev, mpdu, mpdu_tail); if ((!vdev->drop_unenc && !is_mpdu_encrypted) || is_unencrypted_pkt_wai) { #else /* Don't check the PN replay for non-encrypted frames */ if (!vdev->drop_unenc && !htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc)) { #endif ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, mpdu_tail); mpdu = next_mpdu; continue; } /* retrieve PN from rx descriptor */ htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &new_pn, pn_len); /* if there was no prior PN, there's nothing to check */ if (last_pn_valid) { pn_is_replay = pdev->rx_pn[peer->security[index].sec_type].cmp( &new_pn, last_pn, index == txrx_sec_ucast, vdev->opmode); } else if (peer->authorize) { last_pn_valid = peer->tids_last_pn_valid[tid] = 1; } if (peer->authorize && peer->security[index].sec_type == htt_sec_type_aes_ccmp) { if ((new_pn.pn48 & 0xffffffffffffULL) > ((last_pn->pn48 + MAX_CCMP_PN_GAP_ERR_CHECK) & 0xffffffffffffULL)) { /* PN jump wrt last_pn is > MAX_CCMP_PN_GAP_ERR_CHECK - PN of current frame is suspected */ if (suspect_pn->pn48) { /* Check whether PN of the current frame is following prev PN seq or not */ if ((new_pn.pn48 & 0xffffffffffffULL) < (suspect_pn->pn48 & 0xffffffffffffULL)) { /* * PN number of the curr frame < PN no of prev rxed frame * As we are not sure about prev suspect PN, to detect replay, * check the current PN with global PN */ if ((new_pn.pn48 & 0xffffffffffffULL) < (global_pn->pn48 & 0xffffffffffffULL)) { /* Replay violation */ pn_is_replay = 1; } else { /* Current PN is following global PN, so mark this as suspected PN * Don't update last_pn & global_pn */ suspect_pn->pn128[0] = new_pn.pn128[0]; suspect_pn->pn128[1] = new_pn.pn128[1]; update_last_pn = 0; } } else if ((new_pn.pn48 & 0xffffffffffffULL) < ((suspect_pn->pn48 + MAX_CCMP_PN_GAP_ERR_CHECK) & 0xffffffffffffULL)) { /* Current PN is following prev suspected PN seq * Update last_pn & global_pn (update_last_pn = 1;) */ } else { /* * Current PN is neither following prev suspected PN nor last_pn * Mark this as new suspect and don't update last_pn & global_pn */ suspect_pn->pn128[0] = new_pn.pn128[0]; suspect_pn->pn128[1] = new_pn.pn128[1]; update_last_pn = 0; } } else { /* New Jump in PN observed * So mark this PN as suspected and don't update last_pn/global_pn */ suspect_pn->pn128[0] = new_pn.pn128[0]; suspect_pn->pn128[1] = new_pn.pn128[1]; update_last_pn = 0; } } else { /* Valid PN, update last_pn & global_pn (update_last_pn = 1;) */ } } if (pn_is_replay) { adf_nbuf_t msdu; /* * This MPDU failed the PN check: * 1. Notify the control SW of the PN failure * (so countermeasures can be taken, if necessary) * 2. Discard all the MSDUs from this MPDU. */ msdu = mpdu; TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "PN check failed on offload path- TID %d, peer %p " "(%02x:%02x:%02x:%02x:%02x:%02x) %s\n" " old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " global PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " suspect PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" #if RX_DEBUG " htt_status = %d\n" #endif " prev seq num = %d\n", " new seq num = %d\n", tid, peer, peer->mac_addr.raw[0], peer->mac_addr.raw[1], peer->mac_addr.raw[2], peer->mac_addr.raw[3], peer->mac_addr.raw[4], peer->mac_addr.raw[5], (index == txrx_sec_ucast) ? "ucast" : "mcast", last_pn->pn128[1], last_pn->pn128[0], last_pn->pn128[0] & 0xffffffffffffULL, new_pn.pn128[1], new_pn.pn128[0], new_pn.pn128[0] & 0xffffffffffffULL, global_pn->pn128[1], global_pn->pn128[0], global_pn->pn128[0] & 0xffffffffffffULL, suspect_pn->pn128[1], suspect_pn->pn128[0], suspect_pn->pn128[0] & 0xffffffffffffULL, #if RX_DEBUG htt_rx_mpdu_status(pdev->htt_pdev), #endif peer->tids_last_seq[tid], htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_desc)); ol_rx_pn_trace_display(pdev, 1); ol_rx_err( pdev->ctrl_pdev, vdev->vdev_id, peer->mac_addr.raw, tid, htt_rx_mpdu_desc_tsf32(pdev->htt_pdev, rx_desc), OL_RX_ERR_PN, mpdu); /* free all MSDUs within this MPDU */ do { adf_nbuf_t next_msdu; next_msdu = adf_nbuf_next(msdu); htt_rx_desc_frame_free(pdev->htt_pdev, msdu); if (msdu == mpdu_tail) { break; } else { msdu = next_msdu; } } while (1); } else { ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, mpdu_tail); if(peer->authorize) { /* * Remember the new PN. * For simplicity, just do 2 64-bit word copies to cover the worst * case (WAPI), regardless of the length of the PN. * This is more efficient than doing a conditional branch to copy * only the relevant portion. */ if (update_last_pn) { last_pn->pn128[0] = new_pn.pn128[0]; last_pn->pn128[1] = new_pn.pn128[1]; global_pn->pn128[0] = new_pn.pn128[0]; global_pn->pn128[1] = new_pn.pn128[1]; suspect_pn->pn128[0] = 0; suspect_pn->pn128[1] = 0; } OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc); } } mpdu = next_mpdu; } /* make sure the list is null-terminated */ if (out_list_tail) { adf_nbuf_set_next(out_list_tail, NULL); } return out_list_head; } void ol_rx_pn_check( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, adf_nbuf_t msdu_list) { msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list); ol_rx_fwd_check(vdev, peer, tid, msdu_list); }
void ol_rx_fwd_check( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, adf_nbuf_t msdu_list) { struct ol_txrx_pdev_t *pdev = vdev->pdev; adf_nbuf_t deliver_list_head = NULL; adf_nbuf_t deliver_list_tail = NULL; adf_nbuf_t msdu; msdu = msdu_list; while (msdu) { struct ol_txrx_vdev_t *tx_vdev; void *rx_desc; /* * Remember the next list elem, because our processing * may cause the MSDU to get linked into a different list. */ msdu_list = adf_nbuf_next(msdu); rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu); if (!vdev->disable_intrabss_fwd && htt_rx_msdu_forward(pdev->htt_pdev, rx_desc)) { #ifdef QCA_ARP_SPOOFING_WAR void *filter_cb; #endif int do_not_fwd = 0; /* * Use the same vdev that received the frame to * transmit the frame. * This is exactly what we want for intra-BSS forwarding, * like STA-to-STA forwarding and multicast echo. * If this is a intra-BSS forwarding case (which is not * currently supported), then the tx vdev is different * from the rx vdev. * On the LL host the vdevs are not actually used for tx, * so it would still work to use the rx vdev rather than * the tx vdev. * For HL, the tx classification searches for the DA within * the given vdev, so we would want to get the DA peer ID * from the target, so we can locate the tx vdev. */ tx_vdev = vdev; /* * Copying TID value of RX packet to forwarded * packet if the tid is other than non qos tid. * But for non qos tid fill invalid tid so that * Fw will take care of filling proper tid. */ if (tid != HTT_NON_QOS_TID) { adf_nbuf_set_tid(msdu, tid); } else { adf_nbuf_set_tid(msdu, ADF_NBUF_TX_EXT_TID_INVALID); } #ifdef QCA_ARP_SPOOFING_WAR filter_cb = (void *)NBUF_CB_PTR(msdu); if (filter_cb) { do_not_fwd = (*(hdd_filter_cb_t)filter_cb)(vdev->vdev_id, msdu, RX_INTRA_BSS_FWD); } #endif /* * This MSDU needs to be forwarded to the tx path. * Check whether it also needs to be sent to the OS shim, * in which case we need to make a copy (or clone?). */ if (!do_not_fwd) { if (htt_rx_msdu_discard(pdev->htt_pdev, rx_desc)) { htt_rx_msdu_desc_free(pdev->htt_pdev, msdu); adf_net_buf_debug_release_skb(msdu); ol_rx_fwd_to_tx(tx_vdev, msdu); msdu = NULL; /* already handled this MSDU */ tx_vdev->fwd_tx_packets++; vdev->fwd_rx_packets++; TXRX_STATS_ADD(pdev, pub.rx.intra_bss_fwd.packets_fwd, 1); } else { adf_nbuf_t copy; copy = adf_nbuf_copy(msdu); if (copy) { ol_rx_fwd_to_tx(tx_vdev, copy); tx_vdev->fwd_tx_packets++; } TXRX_STATS_ADD(pdev, pub.rx.intra_bss_fwd.packets_stack_n_fwd, 1); } } } else { TXRX_STATS_ADD(pdev, pub.rx.intra_bss_fwd.packets_stack, 1); } if (msdu) { /* send this frame to the OS */ OL_TXRX_LIST_APPEND(deliver_list_head, deliver_list_tail, msdu); } msdu = msdu_list; } if (deliver_list_head) { adf_nbuf_set_next(deliver_list_tail, NULL); /* add NULL terminator */ if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) { ol_rx_in_order_deliver(vdev, peer, tid, deliver_list_head); } else { ol_rx_deliver(vdev, peer, tid, deliver_list_head); } } }