/* WMI Event handler register API */ int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, WMI_EVT_ID event_id) { u_int32_t idx = 0; for (idx = 0; (idx < wmi_handle->max_event_idx && idx < WMI_UNIFIED_MAX_EVENT); ++idx) { if (wmi_handle->event_id[idx] == event_id && wmi_handle->event_handler[idx] != NULL ) { return idx; } } return -1; } int wmi_unified_register_event_handler(wmi_unified_t wmi_handle, WMI_EVT_ID event_id, wmi_unified_event_handler handler_func) { u_int32_t idx=0; if ( wmi_unified_get_event_handler_ix( wmi_handle, event_id) != -1) { printk("%s : event handler already registered 0x%x \n", __func__, event_id); return -1; } if ( wmi_handle->max_event_idx == WMI_UNIFIED_MAX_EVENT ) { printk("%s : no more event handlers 0x%x \n", __func__, event_id); return -1; } idx=wmi_handle->max_event_idx; wmi_handle->event_handler[idx] = handler_func; wmi_handle->event_id[idx] = event_id; wmi_handle->max_event_idx++; return 0; } int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, WMI_EVT_ID event_id) { u_int32_t idx=0; if ( (idx = wmi_unified_get_event_handler_ix( wmi_handle, event_id)) == -1) { printk("%s : event handler is not registered: event id 0x%x \n", __func__, event_id); return -1; } wmi_handle->event_handler[idx] = NULL; wmi_handle->event_id[idx] = 0; --wmi_handle->max_event_idx; wmi_handle->event_handler[idx] = wmi_handle->event_handler[wmi_handle->max_event_idx]; wmi_handle->event_id[idx] = wmi_handle->event_id[wmi_handle->max_event_idx] ; return 0; } #if 0 /* currently not used */ static int wmi_unified_event_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) { u_int32_t id; u_int8_t *event; u_int16_t len; int status = -1; u_int32_t idx = 0; ASSERT(evt_buf != NULL); id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) goto end; idx = wmi_unified_get_event_handler_ix(wmi_handle, id); if (idx == -1) { pr_err("%s : event handler is not registered: event id: 0x%x\n", __func__, id); goto end; } event = adf_nbuf_data(evt_buf); len = adf_nbuf_len(evt_buf); /* Call the WMI registered event handler */ status = wmi_handle->event_handler[idx](wmi_handle->scn_handle, event, len); end: adf_nbuf_free(evt_buf); return status; }
A_STATUS htt_h2t_sync_msg(struct htt_pdev_t *pdev, u_int8_t sync_cnt) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_NO_MEMORY; } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_SYNC_MSG_SZ), /* reserve room for HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_NO_MEMORY; } /* set the length of the message */ adf_nbuf_put_tail(msg, HTT_H2T_SYNC_MSG_SZ); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SYNC); HTT_H2T_SYNC_COUNT_SET(*msg_word, sync_cnt); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, HTC_TX_PACKET_TAG_RUNTIME_PUT); SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) htt_htc_misc_pkt_list_add(pdev, pkt); #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if ((pdev->cfg.is_high_latency) && (!pdev->cfg.default_tx_comp_req)) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return A_OK; }
int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev) { struct htt_htc_pkt *pkt = NULL; adf_nbuf_t msg = NULL; u_int32_t *msg_word; /* New buffer alloc send */ pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_NO_MEMORY; } /* show that this is not a tx frame download (not required, * but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ), /* reserve room for HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_NO_MEMORY; } /* set the length of the message */ adf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); *msg_word = 0; HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word, HTT_WDI_IPA_OPCODE_DBG_STATS); HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, 1); /* tag - not relevant here */ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) htt_htc_misc_pkt_list_add(pdev, pkt); #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif return A_OK; }
static void ol_tx_classify_htt2_frm( struct ol_txrx_vdev_t *vdev, adf_nbuf_t tx_nbuf, struct ol_txrx_msdu_info_t *tx_msdu_info) { struct htt_msdu_info_t *htt = &tx_msdu_info->htt; A_UINT8 candi_frm = 0; /* * Offload the frame re-order to L3 protocol and ONLY support * TCP protocol now. */ if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) && (htt->info.frame_type == htt_frm_type_data) && htt->info.is_unicast && (htt->info.ethertype == ETHERTYPE_IPV4)) { struct ipv4_hdr_t *ipHdr; ipHdr = (struct ipv4_hdr_t *)(adf_nbuf_data(tx_nbuf) + htt->info.l3_hdr_offset); if (ipHdr->protocol == IP_PROTOCOL_TCP) { candi_frm = 1; } } adf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm); }
int htt_rx_ind_flush(htt_pdev_handle pdev, adf_nbuf_t rx_ind_msg) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); return HTT_RX_IND_FLUSH_VALID_GET(*msg_word); }
int htt_rx_ind_release(htt_pdev_handle pdev, adf_nbuf_t rx_ind_msg) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); return HTT_RX_IND_REL_VALID_GET(*msg_word); }
A_STATUS HTCStart(HTC_HANDLE HTCHandle) { adf_nbuf_t netbuf; A_STATUS status = A_OK; HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); HTC_SETUP_COMPLETE_EX_MSG *pSetupComp; HTC_PACKET *pSendPacket; AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Enter\n")); do { HTCConfigTargetHIFPipe(target); /* allocate a buffer to send */ pSendPacket = HTCAllocControlTxPacket(target); if (NULL == pSendPacket) { AR_DEBUG_ASSERT(FALSE); adf_os_print("%s: allocControlTxPacket failed\n",__func__); status = A_NO_MEMORY; break; } netbuf = (adf_nbuf_t)GET_HTC_PACKET_NET_BUF_CONTEXT(pSendPacket); /* assemble setup complete message */ adf_nbuf_put_tail(netbuf, sizeof(HTC_SETUP_COMPLETE_EX_MSG)); pSetupComp = (HTC_SETUP_COMPLETE_EX_MSG *) adf_nbuf_data(netbuf); A_MEMZERO(pSetupComp,sizeof(HTC_SETUP_COMPLETE_EX_MSG)); HTC_SET_FIELD(pSetupComp, HTC_SETUP_COMPLETE_EX_MSG, MESSAGEID, HTC_MSG_SETUP_COMPLETE_EX_ID); //if (!htc_credit_flow) { if (0) { AR_DEBUG_PRINTF(ATH_DEBUG_INIT, ("HTC will not use TX credit flow control\n")); pSetupComp->SetupFlags |= HTC_SETUP_COMPLETE_FLAGS_DISABLE_TX_CREDIT_FLOW; } else { AR_DEBUG_PRINTF(ATH_DEBUG_INIT, ("HTC using TX credit flow control\n")); } SET_HTC_PACKET_INFO_TX(pSendPacket, NULL, (A_UINT8 *)pSetupComp, sizeof(HTC_SETUP_COMPLETE_EX_MSG), ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); status = HTCSendPkt((HTC_HANDLE)target,pSendPacket); if (A_FAILED(status)) { break; } } while (FALSE); AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Exit\n")); return status; }
void ol_rx_err( ol_pdev_handle pdev, u_int8_t vdev_id, u_int8_t *peer_mac_addr, int tid, u_int32_t tsf32, enum ol_rx_err_type err_type, adf_nbuf_t rx_frame) { struct ieee80211_frame wh; struct ether_header *eh; struct ol_ath_softc_net80211 *scn ; struct ieee80211vap *vap; enum ieee80211_opmode opmode; A_BOOL notify = TRUE; eh = (struct ether_header *)adf_nbuf_data(rx_frame); scn = (struct ol_ath_softc_net80211 *)pdev; vap = ol_ath_vap_get(scn, vdev_id); if(vap == NULL) { printk("%s: vap is NULL \n", __func__); return; } opmode = ieee80211_vap_get_opmode(vap); if (err_type == OL_RX_ERR_TKIP_MIC) { /*TODO: Reconstructing the WLAN header for now from ether header * since WLAN header is not available for HL case. */ wh.i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_DATA; wh.i_dur[0] = wh.i_dur[1] = 0; wh.i_seq[0] = wh.i_seq[1] = 0; adf_os_mem_copy(&wh.i_addr1, &vap->iv_myaddr, IEEE80211_ADDR_LEN); adf_os_mem_copy(&wh.i_addr2, peer_mac_addr, IEEE80211_ADDR_LEN); if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_WDS) { wh.i_fc[1] = IEEE80211_FC1_DIR_TODS; adf_os_mem_copy(&wh.i_addr3, &eh->ether_dhost , IEEE80211_ADDR_LEN); } else if (opmode == IEEE80211_M_STA) { wh.i_fc[1] = IEEE80211_FC1_DIR_FROMDS; adf_os_mem_copy(&wh.i_addr3, &eh->ether_shost , IEEE80211_ADDR_LEN); } else { /*TODO: Handle other cases*/ notify = FALSE; } if (notify) { printk("%s: TKIP MIC failure \n",__func__); ieee80211_notify_michael_failure(vap,(const struct ieee80211_frame *)&wh,0); } } }
void htt_rx_ind_release_seq_num_range( htt_pdev_handle pdev, adf_nbuf_t rx_ind_msg, unsigned *seq_num_start, unsigned *seq_num_end) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); msg_word++; *seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word); *seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word); }
void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt) { struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx; wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); #ifdef WMI_INTERFACE_EVENT_LOGGING u_int32_t cmd_id; #endif ASSERT(wmi_cmd_buf); #ifdef WMI_INTERFACE_EVENT_LOGGING cmd_id = WMI_GET_FIELD(adf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR, COMMANDID); adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock); /* Record 16 bytes of WMI cmd tx complete data - exclude TLV and WMI headers */ WMI_COMMAND_TX_CMP_RECORD(cmd_id, ((u_int32_t *)adf_nbuf_data(wmi_cmd_buf) + 2)); adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif adf_nbuf_free(wmi_cmd_buf); adf_os_mem_free(htc_pkt); adf_os_atomic_dec(&wmi_handle->pending_cmds); }
void htt_rx_frag_ind_flush_seq_num_range( htt_pdev_handle pdev, adf_nbuf_t rx_frag_ind_msg, int *seq_num_start, int *seq_num_end) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_frag_ind_msg); msg_word++; *seq_num_start = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_START_GET(*msg_word); *seq_num_end = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_END_GET(*msg_word); }
void htt_rx_ind_mpdu_range_info( struct htt_pdev_t *pdev, adf_nbuf_t rx_ind_msg, int mpdu_range_num, enum htt_rx_status *status, int *mpdu_count) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num; *status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word); *mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word); }
static A_UINT8 ol_tx_tid( struct ol_txrx_pdev_t *pdev, adf_nbuf_t tx_nbuf, struct ol_txrx_msdu_info_t *tx_msdu_info) { A_UINT8 *datap = adf_nbuf_data(tx_nbuf); A_UINT8 tid; if (pdev->frame_format == wlan_frm_fmt_raw) { tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw; ol_tx_set_ether_type(datap, tx_msdu_info); tid = tx_msdu_info->htt.info.ext_tid == ADF_NBUF_TX_EXT_TID_INVALID ? ol_tx_tid_by_raw_type(datap, tx_msdu_info) : tx_msdu_info->htt.info.ext_tid; } else if (pdev->frame_format == wlan_frm_fmt_802_3) { tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet; ol_tx_set_ether_type(datap, tx_msdu_info); tid = tx_msdu_info->htt.info.ext_tid == ADF_NBUF_TX_EXT_TID_INVALID ? ol_tx_tid_by_ether_type(datap, tx_msdu_info) : tx_msdu_info->htt.info.ext_tid; } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) { struct llc_snap_hdr_t *llc; tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi; tx_msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame); llc = (struct llc_snap_hdr_t *) (datap + tx_msdu_info->htt.info.l3_hdr_offset); tx_msdu_info->htt.info.ethertype = (llc->ethertype[0] << 8) | llc->ethertype[1]; /* * Native WiFi is a special case of "raw" 802.11 header format. * However, we expect that for all cases that use native WiFi, * the TID will be directly specified out of band. */ tid = tx_msdu_info->htt.info.ext_tid; } else { VOS_TRACE(VOS_MODULE_ID_TXRX, VOS_TRACE_LEVEL_FATAL, "Invalid standard frame type: %d\n", pdev->frame_format); adf_os_assert(0); tid = HTT_TX_EXT_TID_INVALID; } return tid; }
static inline A_STATUS ol_tx_copy_native_wifi_header( adf_nbuf_t msdu, u_int8_t *hdsize, u_int8_t *localbuf) { struct ieee80211_frame *wh = (struct ieee80211_frame*)adf_nbuf_data(msdu); if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) { *hdsize = sizeof(struct ieee80211_frame_addr4); } else { *hdsize = sizeof(struct ieee80211_frame); } if (adf_nbuf_len(msdu) < *hdsize) { return A_ERROR; } adf_os_mem_copy(localbuf, wh, *hdsize); return A_OK; }
static void HTT_RX_FRAG_SET_LAST_MSDU( struct htt_pdev_t *pdev, adf_nbuf_t msg) { u_int32_t *msg_word; unsigned num_msdu_bytes; adf_nbuf_t msdu; struct htt_host_rx_desc_base *rx_desc; int start_idx; u_int8_t *p_fw_msdu_rx_desc = 0; msg_word = (u_int32_t *) adf_nbuf_data(msg); num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32)); /* * 1 word for the message header, * 1 word to specify the number of MSDU bytes, * 1 word for every 4 MSDU bytes (round up), * 1 word for the MPDU range header */ pdev->rx_mpdu_range_offset_words = 3 + ((num_msdu_bytes + 3) >> 2); pdev->rx_ind_msdu_byte_idx = 0; p_fw_msdu_rx_desc = ((u_int8_t *)(msg_word) + HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET)); /* * Fix for EV126710, in which BSOD occurs due to last_msdu bit * not set while the next pointer is deliberately set to NULL * before calling ol_rx_pn_check_base() * * For fragment frames, the HW may not have set the last_msdu bit * in the rx descriptor, but the SW expects this flag to be set, * since each fragment is in a separate MPDU. Thus, set the flag here, * just in case the HW didn't. */ start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld; msdu = pdev->rx_ring.buf.netbufs_ring[start_idx]; adf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE); adf_nbuf_unmap(pdev->osdev, msdu, ADF_OS_DMA_FROM_DEVICE); rx_desc = htt_rx_desc(msdu); *((u_int8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc; rx_desc->msdu_end.last_msdu = 1; adf_nbuf_map(pdev->osdev, msdu, ADF_OS_DMA_FROM_DEVICE); }
/*--- rx indication message ---*/ int htt_rx_ind_flush(htt_pdev_handle pdev, adf_nbuf_t rx_ind_msg) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); return HTT_RX_IND_FLUSH_VALID_GET(*msg_word); } void htt_rx_ind_flush_seq_num_range( htt_pdev_handle pdev, adf_nbuf_t rx_ind_msg, unsigned *seq_num_start, unsigned *seq_num_end) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); msg_word++; *seq_num_start = HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*msg_word); *seq_num_end = HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*msg_word); } int htt_rx_ind_release(htt_pdev_handle pdev, adf_nbuf_t rx_ind_msg) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); return HTT_RX_IND_REL_VALID_GET(*msg_word); } void htt_rx_ind_release_seq_num_range( htt_pdev_handle pdev, adf_nbuf_t rx_ind_msg, unsigned *seq_num_start, unsigned *seq_num_end) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); msg_word++; *seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word); *seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word); } void htt_rx_ind_mpdu_range_info( struct htt_pdev_t *pdev, adf_nbuf_t rx_ind_msg, int mpdu_range_num, enum htt_rx_status *status, int *mpdu_count) { u_int32_t *msg_word; msg_word = (u_int32_t *) adf_nbuf_data(rx_ind_msg); msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num; *status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word); *mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word); } #define HTT_TGT_NOISE_FLOOR_DBM (-95) /* approx */ int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, adf_nbuf_t rx_ind_msg) { int16_t rssi; u_int32_t *msg_word; msg_word = (u_int32_t *) (adf_nbuf_data(rx_ind_msg) + HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET); /* check if the RX_IND message contains valid rx PPDU start info */ if (!HTT_RX_IND_START_VALID_GET(*msg_word)) { return HTT_RSSI_INVALID; } rssi = HTT_RX_IND_RSSI_CMB_GET(*msg_word); return (HTT_TGT_RSSI_INVALID == rssi) ? HTT_RSSI_INVALID : rssi + HTT_TGT_NOISE_FLOOR_DBM; }
void epping_tx_dup_pkt(epping_adapter_t *pAdapter, HTC_ENDPOINT_ID eid, adf_nbuf_t skb) { struct epping_cookie * cookie = NULL; int skb_len, ret; adf_nbuf_t new_skb; cookie = epping_alloc_cookie(pAdapter->pEpping_ctx); if (cookie == NULL) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: epping_alloc_cookie returns no resource\n", __func__); return; } new_skb = adf_nbuf_copy(skb); if (!new_skb) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: adf_nbuf_copy returns no resource\n", __func__); epping_free_cookie(pAdapter->pEpping_ctx, cookie); return; } SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, adf_nbuf_data(skb), adf_nbuf_len(new_skb), eid, 0); SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, new_skb); skb_len = (int)adf_nbuf_len(new_skb); /* send the packet */ ret = HTCSendPkt(pAdapter->pEpping_ctx->HTCHandle, &cookie->HtcPkt); if (ret != A_OK) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: HTCSendPkt failed, ret = %d\n", __func__, ret); epping_free_cookie(pAdapter->pEpping_ctx, cookie); adf_nbuf_free(new_skb); return; } pAdapter->stats.tx_bytes += skb_len; ++pAdapter->stats.tx_packets; if (((pAdapter->stats.tx_packets + pAdapter->stats.tx_dropped) % EPPING_STATS_LOG_COUNT) == 0 && (pAdapter->stats.tx_packets || pAdapter->stats.tx_dropped)) { epping_log_stats(pAdapter, __func__); } }
wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, u_int16_t len) { wmi_buf_t wmi_buf; if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { VOS_ASSERT(0); return NULL; } wmi_buf = adf_nbuf_alloc(NULL, roundup(len + WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, FALSE); if (!wmi_buf) return NULL; /* Clear the wmi buffer */ OS_MEMZERO(adf_nbuf_data(wmi_buf), len); /* * Set the length of the buffer to match the allocation size. */ adf_nbuf_set_pktlen(wmi_buf, len); return wmi_buf; }
A_STATUS HTCRxCompletionHandler( void *Context, adf_nbuf_t netbuf, a_uint8_t pipeID) { A_STATUS status = A_OK; HTC_FRAME_HDR *HtcHdr; HTC_TARGET *target = (HTC_TARGET *)Context; a_uint8_t *netdata; a_uint32_t netlen; HTC_ENDPOINT *pEndpoint; HTC_PACKET *pPacket; A_UINT16 payloadLen; a_uint32_t trailerlen = 0; A_UINT8 htc_ep_id; #ifdef RX_SG_SUPPORT LOCK_HTC_RX(target); if (target->IsRxSgInprogress) { target->CurRxSgTotalLen += adf_nbuf_len(netbuf); adf_nbuf_queue_add(&target->RxSgQueue, netbuf); if (target->CurRxSgTotalLen == target->ExpRxSgTotalLen) { netbuf = RxSgToSingleNetbuf(target); if (netbuf == NULL) { UNLOCK_HTC_RX(target); goto _out; } } else { netbuf = NULL; UNLOCK_HTC_RX(target); goto _out; } } UNLOCK_HTC_RX(target); #endif netdata = adf_nbuf_data(netbuf); netlen = adf_nbuf_len(netbuf); HtcHdr = (HTC_FRAME_HDR *)netdata; do { htc_ep_id = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, ENDPOINTID); pEndpoint = &target->EndPoint[htc_ep_id]; if (htc_ep_id >= ENDPOINT_MAX) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx: invalid EndpointID=%d\n",htc_ep_id)); DebugDumpBytes((A_UINT8 *)HtcHdr,sizeof(HTC_FRAME_HDR),"BAD HTC Header"); status = A_ERROR; break; } /* * If this endpoint that received a message from the target has * a to-target HIF pipe whose send completions are polled rather * than interrupt-driven, this is a good point to ask HIF to check * whether it has any completed sends to handle. */ if (pEndpoint->ul_is_polled) { HTCSendCompleteCheck(pEndpoint, 1); } payloadLen = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, PAYLOADLEN); if (netlen < (payloadLen + HTC_HDR_LENGTH)) { #ifdef RX_SG_SUPPORT LOCK_HTC_RX(target); target->IsRxSgInprogress = TRUE; adf_nbuf_queue_init(&target->RxSgQueue); adf_nbuf_queue_add(&target->RxSgQueue, netbuf); target->ExpRxSgTotalLen = (payloadLen + HTC_HDR_LENGTH); target->CurRxSgTotalLen += netlen; UNLOCK_HTC_RX(target); netbuf = NULL; break; #else AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx: insufficient length, got:%d expected =%d\n", netlen, payloadLen + HTC_HDR_LENGTH)); DebugDumpBytes((A_UINT8 *)HtcHdr,sizeof(HTC_FRAME_HDR),"BAD RX packet length"); status = A_ERROR; break; #endif } #ifdef HTC_EP_STAT_PROFILING LOCK_HTC_RX(target); INC_HTC_EP_STAT(pEndpoint,RxReceived,1); UNLOCK_HTC_RX(target); #endif //if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { { A_UINT8 temp; /* get flags to check for trailer */ temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, FLAGS); if (temp & HTC_FLAGS_RECV_TRAILER) { /* extract the trailer length */ temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, CONTROLBYTES0); if ((temp < sizeof(HTC_RECORD_HDR)) || (temp > payloadLen)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HTCProcessRecvHeader, invalid header (payloadlength should be :%d, CB[0] is:%d) \n", payloadLen, temp)); status = A_EPROTO; break; } trailerlen = temp; /* process trailer data that follows HDR + application payload */ status = HTCProcessTrailer(target, ((A_UINT8 *)HtcHdr + HTC_HDR_LENGTH + payloadLen - temp), temp, htc_ep_id); if (A_FAILED(status)) { break; } } } if (((int)payloadLen - (int)trailerlen) <= 0) { /* zero length packet with trailer data, just drop these */ break; } if (htc_ep_id == ENDPOINT_0) { A_UINT16 message_id; HTC_UNKNOWN_MSG *htc_msg; /* remove HTC header */ adf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH); netdata = adf_nbuf_data(netbuf); netlen = adf_nbuf_len(netbuf); htc_msg = (HTC_UNKNOWN_MSG*)netdata; message_id = HTC_GET_FIELD(htc_msg, HTC_UNKNOWN_MSG, MESSAGEID); switch (message_id) { default: /* handle HTC control message */ if (target->CtrlResponseProcessing) { /* this is a fatal error, target should not be sending unsolicited messages * on the endpoint 0 */ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx Ctrl still processing\n")); status = A_ERROR; break; } LOCK_HTC_RX(target); target->CtrlResponseLength = min((int)netlen,HTC_MAX_CONTROL_MESSAGE_LENGTH); A_MEMCPY(target->CtrlResponseBuffer,netdata,target->CtrlResponseLength); UNLOCK_HTC_RX(target); adf_os_mutex_release(target->osdev, &target->CtrlResponseValid); break; case HTC_MSG_SEND_SUSPEND_COMPLETE: target->HTCInitInfo.TargetSendSuspendComplete(target->HTCInitInfo.pContext); break; } adf_nbuf_free(netbuf); netbuf = NULL; break; } /* the current message based HIF architecture allocates net bufs for recv packets * since this layer bridges that HIF to upper layers , which expects HTC packets, * we form the packets here * TODO_FIXME */ pPacket = AllocateHTCPacketContainer(target); if (NULL == pPacket) { status = A_NO_RESOURCE; break; } pPacket->Status = A_OK; pPacket->Endpoint = htc_ep_id; pPacket->pPktContext = netbuf; pPacket->pBuffer = adf_nbuf_data(netbuf) + HTC_HDR_LENGTH; pPacket->ActualLength = netlen - HTC_HEADER_LEN - trailerlen; /* TODO : this is a hack because the driver layer will set the actual length * of the skb again which will just double the length */ //A_NETBUF_TRIM(netbuf,netlen); adf_nbuf_trim_tail(netbuf, netlen); RecvPacketCompletion(target,pEndpoint,pPacket); /* recover the packet container */ FreeHTCPacketContainer(target,pPacket); netbuf = NULL; } while(FALSE); #ifdef RX_SG_SUPPORT _out: #endif if (netbuf != NULL) { adf_nbuf_free(netbuf); } return status; }
static inline A_STATUS ol_tx_encap_from_8023 ( struct ol_txrx_vdev_t *vdev, struct ol_tx_desc_t *tx_desc, adf_nbuf_t msdu, struct ol_txrx_msdu_info_t *tx_msdu_info ) { u_int8_t localbuf[ sizeof(struct ieee80211_qosframe_htc_addr4) \ + sizeof(struct llc_snap_hdr_t)]; struct llc_snap_hdr_t *llc_hdr; struct ethernet_hdr_t *eth_hdr; struct ieee80211_frame *wh; u_int8_t hdsize, new_l2_hdsize, new_hdsize; struct ieee80211_qoscntl *qos_cntl; const u_int8_t ethernet_II_llc_snap_header_prefix[] = \ { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; struct ol_txrx_peer_t *peer; u_int16_t ether_type; if (tx_msdu_info->htt.info.frame_type != htt_frm_type_data) return A_OK; /* * for unicast,the peer should not be NULL. * for multicast, the peer is AP. */ peer = tx_msdu_info->peer; eth_hdr = (struct ethernet_hdr_t *)adf_nbuf_data(msdu); hdsize = sizeof(struct ethernet_hdr_t); wh = (struct ieee80211_frame *)localbuf; wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; *(u_int16_t *)wh->i_dur = 0; new_hdsize = 0; switch (vdev->opmode) { case wlan_op_mode_ap: /* DA , BSSID , SA*/ adf_os_mem_copy(wh->i_addr1, eth_hdr->dest_addr, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr2, &vdev->mac_addr.raw, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr3, eth_hdr->src_addr, IEEE80211_ADDR_LEN); wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; new_hdsize = sizeof(struct ieee80211_frame); break; case wlan_op_mode_ibss: /* DA, SA, BSSID */ adf_os_mem_copy(wh->i_addr1, eth_hdr->dest_addr, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr2, eth_hdr->src_addr, IEEE80211_ADDR_LEN); /* need to check the bssid behaviour for IBSS vdev */ adf_os_mem_copy(wh->i_addr3, &vdev->mac_addr.raw, IEEE80211_ADDR_LEN); wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; new_hdsize = sizeof(struct ieee80211_frame); break; case wlan_op_mode_sta: /* BSSID, SA , DA*/ adf_os_mem_copy(wh->i_addr1, &peer->mac_addr.raw, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr2, eth_hdr->src_addr, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr3, eth_hdr->dest_addr, IEEE80211_ADDR_LEN); wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; new_hdsize = sizeof(struct ieee80211_frame); break; case wlan_op_mode_monitor: default: return A_ERROR; } /*add qos cntl*/ if (tx_msdu_info->htt.info.is_unicast && peer->qos_capable ) { qos_cntl = (struct ieee80211_qoscntl*)(localbuf + new_hdsize); qos_cntl->i_qos[0] = tx_msdu_info->htt.info.ext_tid & IEEE80211_QOS_TID; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS; #if 0 if ( wmmParam[ac].wmep_noackPolicy ) { qos_cntl->i_qos[0]|= 1 << IEEE80211_QOS_ACKPOLICY_S; } #endif qos_cntl->i_qos[1] = 0; new_hdsize += sizeof(struct ieee80211_qoscntl); /*add ht control field if needed */ } /* Set Protected Frame bit in MAC header */ if (vdev->pdev->sw_pf_proc_enable && tx_msdu_info->htt.action.do_encrypt) { wh->i_fc[1] |= IEEE80211_FC1_WEP; } new_l2_hdsize = new_hdsize; /* add llc snap if needed */ if (vdev->pdev->sw_tx_llc_proc_enable) { llc_hdr = (struct llc_snap_hdr_t *) (localbuf + new_hdsize); ether_type = (eth_hdr->ethertype[0]<<8) |(eth_hdr->ethertype[1]); if ( ether_type >= IEEE8023_MAX_LEN ) { adf_os_mem_copy(llc_hdr, ethernet_II_llc_snap_header_prefix, sizeof(ethernet_II_llc_snap_header_prefix)); if ( ether_type == ETHERTYPE_AARP || ether_type == ETHERTYPE_IPX) { llc_hdr->org_code[2] = BTEP_SNAP_ORGCODE_2;// 0xf8; bridge tunnel header } llc_hdr->ethertype[0] = eth_hdr->ethertype[0]; llc_hdr->ethertype[1] = eth_hdr->ethertype[1]; new_hdsize += sizeof(struct llc_snap_hdr_t); } else { /*llc ready, and it's in payload pdu, do we need to move to BD pdu?*/ } } adf_os_mem_copy( (void*)htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,new_l2_hdsize), localbuf, new_hdsize); adf_nbuf_pull_head(msdu,hdsize); tx_msdu_info->htt.info.l3_hdr_offset = new_l2_hdsize; tx_desc->orig_l2_hdr_bytes = hdsize; return A_OK; }
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int uc_tx_buf_sz, unsigned int uc_tx_buf_cnt, unsigned int uc_tx_partition_base) { unsigned int tx_buffer_count; unsigned int tx_buffer_count_pwr2; adf_nbuf_t buffer_vaddr; u_int32_t buffer_paddr; u_int32_t *header_ptr; u_int32_t *ring_vaddr; int return_code = 0; uint16_t idx; /* Allocate CE Write Index WORD */ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, 4, &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) { adf_os_print("%s: CE Write Index WORD alloc fail", __func__); return -1; } /* Allocate TX COMP Ring */ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, uc_tx_buf_cnt * 4, &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) { adf_os_print("%s: TX COMP ring alloc fail", __func__); return_code = -2; goto free_tx_ce_idx; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4); /* Allocate TX BUF vAddress Storage */ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg = (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) { adf_os_print("%s: TX BUF POOL vaddr storage alloc fail", __func__); return_code = -3; goto free_tx_comp_base; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr; /* Allocate TX buffers as many as possible */ for (tx_buffer_count = 0; tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) { buffer_vaddr = adf_nbuf_alloc(pdev->osdev, uc_tx_buf_sz, 0, 4, FALSE); if (!buffer_vaddr) { adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d", __func__, tx_buffer_count); break; } /* Init buffer */ adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz); header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr); *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT; header_ptr++; *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16; adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL); buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0); header_ptr++; *header_ptr = (u_int32_t)(buffer_paddr + 16); header_ptr++; *header_ptr = 0xFFFFFFFF; /* FRAG Header */ header_ptr++; *header_ptr = buffer_paddr + 32; *ring_vaddr = buffer_paddr; pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] = buffer_vaddr; /* Memory barrier to ensure actual value updated */ ring_vaddr++; } /* * Tx complete ring buffer count should be power of 2. * So, allocated Tx buffer count should be one less than ring buffer size. */ tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1; if (tx_buffer_count > tx_buffer_count_pwr2) { adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d", __func__, tx_buffer_count, tx_buffer_count_pwr2); /* Free over allocated buffers below power of 2 */ for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) { if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) { adf_nbuf_unmap(pdev->osdev, pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx], ADF_OS_DMA_FROM_DEVICE); adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]); } } } if (tx_buffer_count_pwr2 < 0) { adf_os_print("%s: Failed to round down Tx buffer count %d", __func__, tx_buffer_count_pwr2); goto free_tx_comp_base; } pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2; return 0; free_tx_comp_base: adf_os_mem_free_consistent(pdev->osdev, ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4, pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); free_tx_ce_idx: adf_os_mem_free_consistent(pdev->osdev, 4, pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr, pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); return return_code; }
static int epping_tx_send_int(adf_nbuf_t skb, epping_adapter_t *pAdapter) { EPPING_HEADER *eppingHdr = (EPPING_HEADER *)adf_nbuf_data(skb); HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED; struct epping_cookie * cookie = NULL; A_UINT8 ac = 0; A_STATUS ret = A_OK; int skb_len; EPPING_HEADER tmpHdr = *eppingHdr; /* allocate resource for this packet */ cookie = epping_alloc_cookie(pAdapter->pEpping_ctx); /* no resource */ if (cookie == NULL) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: epping_alloc_cookie returns no resource\n", __func__); return -1; } if (enb_tx_dump) epping_hex_dump((void *)eppingHdr, skb->len, __func__); /* * a quirk of linux, the payload of the frame is 32-bit aligned and thus * the addition of the HTC header will mis-align the start of the HTC * frame, so we add some padding which will be stripped off in the target */ if (EPPING_ALIGNMENT_PAD > 0) { A_NETBUF_PUSH(skb, EPPING_ALIGNMENT_PAD); } /* prepare ep/HTC information */ ac = eppingHdr->StreamNo_h; eid = pAdapter->pEpping_ctx->EppingEndpoint[ac]; if (eid < 0 || eid >= EPPING_MAX_NUM_EPIDS) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: invalid eid = %d, ac = %d\n", __func__, eid, ac); return -1; } if (tmpHdr.Cmd_h == EPPING_CMD_RESET_RECV_CNT || tmpHdr.Cmd_h == EPPING_CMD_CONT_RX_START) { epping_set_kperf_flag(pAdapter, eid, tmpHdr.CmdBuffer_t[0]); } if (pAdapter->pEpping_ctx->kperf[eid]) { switch (tmpHdr.Cmd_h) { case EPPING_CMD_NO_ECHO: #ifdef HIF_PCI epping_tx_copier_schedule(pAdapter->pEpping_ctx, eid, skb); #endif /* HIF_PCI */ break; default: break; } } if (pAdapter->pEpping_ctx->kperf[eid] && tmpHdr.Cmd_h == EPPING_CMD_NO_ECHO) { epping_tx_dup_pkt(pAdapter, eid, skb); } SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, cookie, adf_nbuf_data(skb), adf_nbuf_len(skb), eid, 0); SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, skb); skb_len = skb->len; /* send the packet */ ret = HTCSendPkt(pAdapter->pEpping_ctx->HTCHandle, &cookie->HtcPkt); epping_log_packet(pAdapter, &tmpHdr, ret, __func__); if (ret != A_OK) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: HTCSendPkt failed, status = %d\n", __func__, ret); epping_free_cookie(pAdapter->pEpping_ctx, cookie); return -1; } pAdapter->stats.tx_bytes += skb_len; ++pAdapter->stats.tx_packets; if (((pAdapter->stats.tx_packets + pAdapter->stats.tx_dropped) % EPPING_STATS_LOG_COUNT) == 0 && (pAdapter->stats.tx_packets || pAdapter->stats.tx_dropped)) { epping_log_stats(pAdapter, __func__); } return 0; }
static inline void ol_rx_decap_to_8023 ( struct ol_txrx_vdev_t *vdev, adf_nbuf_t msdu, struct ol_rx_decap_info_t *info, struct ethernet_hdr_t *ethr_hdr) { struct llc_snap_hdr_t *llc_hdr; u_int16_t ether_type; u_int16_t l2_hdr_space; struct ieee80211_frame_addr4 *wh; u_int8_t local_buf[ETHERNET_HDR_LEN]; u_int8_t *buf; /* * populate Ethernet header, * if ethr_hdr is null, rx frame is 802.11 format(HW ft disabled) * if ethr_hdr is not null, rx frame is "subfrm of amsdu". */ buf = (u_int8_t *)adf_nbuf_data(msdu); llc_hdr = (struct llc_snap_hdr_t *)buf; ether_type = (llc_hdr->ethertype[0] << 8)|llc_hdr->ethertype[1]; /* do llc remove if needed */ l2_hdr_space = 0; if (IS_SNAP(llc_hdr)) { if (IS_BTEP(llc_hdr)) { /* remove llc*/ l2_hdr_space += sizeof(struct llc_snap_hdr_t); llc_hdr = NULL; } else if (IS_RFC1042(llc_hdr)) { if ( !(ether_type == ETHERTYPE_AARP || ether_type == ETHERTYPE_IPX) ) { /* remove llc*/ l2_hdr_space += sizeof(struct llc_snap_hdr_t); llc_hdr = NULL; } } } if (l2_hdr_space > ETHERNET_HDR_LEN) { buf = adf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN); } else if (l2_hdr_space < ETHERNET_HDR_LEN) { buf = adf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space); } /* normal msdu(non-subfrm of A-MSDU) if ethr_hdr is null */ if (ethr_hdr == NULL) { /* mpdu hdr should be present in info,re-create ethr_hdr based on mpdu hdr*/ TXRX_ASSERT2(info->hdr_len != 0); wh = (struct ieee80211_frame_addr4 *)info->hdr; ethr_hdr = (struct ethernet_hdr_t *)local_buf; switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: adf_os_mem_copy(ethr_hdr->dest_addr, wh->i_addr1, ETHERNET_ADDR_LEN); adf_os_mem_copy(ethr_hdr->src_addr, wh->i_addr2, ETHERNET_ADDR_LEN); break; case IEEE80211_FC1_DIR_TODS: adf_os_mem_copy(ethr_hdr->dest_addr, wh->i_addr3, ETHERNET_ADDR_LEN); adf_os_mem_copy(ethr_hdr->src_addr, wh->i_addr2, ETHERNET_ADDR_LEN); break; case IEEE80211_FC1_DIR_FROMDS: adf_os_mem_copy(ethr_hdr->dest_addr, wh->i_addr1, ETHERNET_ADDR_LEN); adf_os_mem_copy(ethr_hdr->src_addr, wh->i_addr3, ETHERNET_ADDR_LEN); break; case IEEE80211_FC1_DIR_DSTODS: adf_os_mem_copy(ethr_hdr->dest_addr, wh->i_addr3, ETHERNET_ADDR_LEN); adf_os_mem_copy(ethr_hdr->src_addr, wh->i_addr4, ETHERNET_ADDR_LEN); break; } } if (llc_hdr == NULL) { ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff; ethr_hdr->ethertype[1] = (ether_type) & 0xff; }
/* * Call netif_stop_queue frequently will impact the mboxping tx t-put. * Return HTC_SEND_FULL_KEEP directly in epping_tx_queue_full to avoid. */ return HTC_SEND_FULL_KEEP; } #endif /* HIF_SDIO */ void epping_tx_complete_multiple(void *ctx, HTC_PACKET_QUEUE *pPacketQueue) { epping_context_t *pEpping_ctx = (epping_context_t *)ctx; epping_adapter_t *pAdapter = pEpping_ctx->epping_adapter; struct net_device* dev = pAdapter->dev; A_STATUS status; HTC_ENDPOINT_ID eid; adf_nbuf_t pktSkb; struct epping_cookie *cookie; A_BOOL flushing = FALSE; adf_nbuf_queue_t skb_queue; HTC_PACKET *htc_pkt; adf_nbuf_queue_init(&skb_queue); adf_os_spin_lock_bh(&pAdapter->data_lock); while (!HTC_QUEUE_EMPTY(pPacketQueue)) { htc_pkt = HTC_PACKET_DEQUEUE(pPacketQueue); if (htc_pkt == NULL) break; status=htc_pkt->Status; eid=htc_pkt->Endpoint; pktSkb=GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); cookie = htc_pkt->pPktContext; if (!pktSkb) { EPPING_LOG(VOS_TRACE_LEVEL_ERROR, "%s: pktSkb is NULL", __func__); ASSERT(0); } else { if (htc_pkt->pBuffer != adf_nbuf_data(pktSkb)) { EPPING_LOG(VOS_TRACE_LEVEL_ERROR, "%s: htc_pkt buffer not equal to skb->data", __func__); ASSERT(0); } /* add this to the list, use faster non-lock API */ adf_nbuf_queue_add(&skb_queue,pktSkb); if (A_SUCCESS(status)) if (htc_pkt->ActualLength != adf_nbuf_len(pktSkb)) { EPPING_LOG(VOS_TRACE_LEVEL_ERROR, "%s: htc_pkt length not equal to skb->len", __func__); ASSERT(0); } } EPPING_LOG(VOS_TRACE_LEVEL_INFO, "%s skb=%p data=%p len=0x%x eid=%d ", __func__, pktSkb, htc_pkt->pBuffer, htc_pkt->ActualLength, eid); if (A_FAILED(status)) { if (status == A_ECANCELED) { /* a packet was flushed */ flushing = TRUE; } if (status != A_NO_RESOURCE) { printk("%s() -TX ERROR, status: 0x%x\n", __func__, status); } } else { EPPING_LOG(VOS_TRACE_LEVEL_INFO, "%s: OK\n", __func__); flushing = FALSE; } epping_free_cookie(pAdapter->pEpping_ctx, cookie); } adf_os_spin_unlock_bh(&pAdapter->data_lock); /* free all skbs in our local list */ while (adf_nbuf_queue_len(&skb_queue)) { /* use non-lock version */ pktSkb = adf_nbuf_queue_remove(&skb_queue); if (pktSkb == NULL) break; adf_nbuf_tx_free(pktSkb, ADF_NBUF_PKT_ERROR); pEpping_ctx->total_tx_acks++; } if (!flushing) { netif_wake_queue(dev); } }
int epping_tx_send(adf_nbuf_t skb, epping_adapter_t *pAdapter) { adf_nbuf_t nodrop_skb; EPPING_HEADER *eppingHdr; A_UINT8 ac = 0; eppingHdr = (EPPING_HEADER *)adf_nbuf_data(skb); if (!IS_EPPING_PACKET(eppingHdr)) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: Recived non endpoint ping packets\n", __func__); /* no packet to send, cleanup */ adf_nbuf_free(skb); return -ENOMEM; } /* the stream ID is mapped to an access class */ ac = eppingHdr->StreamNo_h; /* hard coded two ep ids */ if (ac != 0 && ac != 1) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: ac %d is not mapped to mboxping service\n", __func__, ac); adf_nbuf_free(skb); return -ENOMEM; } /* * some EPPING packets cannot be dropped no matter what access class * it was sent on. A special care has been taken: * 1. when there is no TX resource, queue the control packets to * a special queue * 2. when there is TX resource, send the queued control packets first * and then other packets * 3. a timer launches to check if there is queued control packets and * flush them */ /* check the nodrop queue first */ while ((nodrop_skb = adf_nbuf_queue_remove(&pAdapter->nodrop_queue))) { HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, TRUE); if (epping_tx_send_int(nodrop_skb, pAdapter)) { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: nodrop: %p xmit fail\n", __func__, nodrop_skb); /* fail to xmit so put the nodrop packet to the nodrop queue */ adf_nbuf_queue_insert_head(&pAdapter->nodrop_queue, nodrop_skb); /* no cookie so free the current skb */ goto tx_fail; } else { HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, FALSE); EPPING_LOG(VOS_TRACE_LEVEL_INFO, "%s: nodrop: %p xmit ok\n", __func__, nodrop_skb); } } /* send the original packet */ if (epping_tx_send_int(skb, pAdapter)) goto tx_fail; return 0; tx_fail: if (!IS_EPING_PACKET_NO_DROP(eppingHdr)) { /* allow to drop the skb so drop it */ adf_nbuf_free(skb); ++pAdapter->stats.tx_dropped; EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: Tx skb %p dropped, stats.tx_dropped = %ld\n", __func__, skb, pAdapter->stats.tx_dropped); return -ENOMEM; } else { EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: nodrop: %p queued\n", __func__, skb); adf_nbuf_queue_add(&pAdapter->nodrop_queue, skb); adf_os_spin_lock_bh(&pAdapter->data_lock); if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) { pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING; adf_os_timer_mod(&pAdapter->epping_timer, TX_RETRY_TIMEOUT_IN_MS); } adf_os_spin_unlock_bh(&pAdapter->data_lock); } return 0; }
/* WMI command API */ int wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, int len, WMI_CMD_ID cmd_id) { HTC_PACKET *pkt; A_STATUS status; void *vos_context; struct ol_softc *scn; A_UINT16 htc_tag = 0; if (wmi_get_runtime_pm_inprogress(wmi_handle)) goto skip_suspend_check; if (adf_os_atomic_read(&wmi_handle->is_target_suspended) && ( (WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID != cmd_id) && (WMI_PDEV_RESUME_CMDID != cmd_id)) ) { pr_err("%s: Target is suspended could not send WMI command: %d\n", __func__, cmd_id); VOS_ASSERT(0); return -EBUSY; } else goto dont_tag; skip_suspend_check: switch(cmd_id) { case WMI_WOW_ENABLE_CMDID: case WMI_PDEV_SUSPEND_CMDID: case WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID: case WMI_WOW_ADD_WAKE_PATTERN_CMDID: case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: case WMI_PDEV_RESUME_CMDID: case WMI_WOW_DEL_WAKE_PATTERN_CMDID: #ifdef FEATURE_WLAN_D0WOW case WMI_D0_WOW_ENABLE_DISABLE_CMDID: #endif htc_tag = HTC_TX_PACKET_TAG_AUTO_PM; default: break; } dont_tag: /* Do sanity check on the TLV parameter structure */ { void *buf_ptr = (void *) adf_nbuf_data(buf); if (wmitlv_check_command_tlv_params(NULL, buf_ptr, len, cmd_id) != 0) { adf_os_print("\nERROR: %s: Invalid WMI Parameter Buffer for Cmd:%d\n", __func__, cmd_id); return -1; } } if (adf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) { pr_err("%s, Failed to send cmd %x, no memory\n", __func__, cmd_id); return -ENOMEM; } WMI_SET_FIELD(adf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); adf_os_atomic_inc(&wmi_handle->pending_cmds); if (adf_os_atomic_read(&wmi_handle->pending_cmds) >= WMI_MAX_CMDS) { vos_context = vos_get_global_context(VOS_MODULE_ID_WDA, NULL); scn = vos_get_context(VOS_MODULE_ID_HIF, vos_context); pr_err("\n%s: hostcredits = %d\n", __func__, wmi_get_host_credits(wmi_handle)); HTC_dump_counter_info(wmi_handle->htc_handle); //dump_CE_register(scn); //dump_CE_debug_register(scn->hif_sc); adf_os_atomic_dec(&wmi_handle->pending_cmds); pr_err("%s: MAX 1024 WMI Pending cmds reached.\n", __func__); vos_set_logp_in_progress(VOS_MODULE_ID_VOSS, TRUE); schedule_work(&recovery_work); return -EBUSY; } pkt = adf_os_mem_alloc(NULL, sizeof(*pkt)); if (!pkt) { adf_os_atomic_dec(&wmi_handle->pending_cmds); pr_err("%s, Failed to alloc htc packet %x, no memory\n", __func__, cmd_id); return -ENOMEM; } SET_HTC_PACKET_INFO_TX(pkt, NULL, adf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR), /* htt_host_data_dl_len(buf)+20 */ wmi_handle->wmi_endpoint_id, htc_tag); SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf); WMA_LOGD("Send WMI command:%s command_id:%d", get_wmi_cmd_string(cmd_id), cmd_id); #ifdef WMI_INTERFACE_EVENT_LOGGING adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock); /*Record 16 bytes of WMI cmd data - exclude TLV and WMI headers*/ WMI_COMMAND_RECORD(cmd_id ,((u_int32_t *)adf_nbuf_data(buf) + 2)); adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif status = HTCSendPkt(wmi_handle->htc_handle, pkt); if (A_OK != status) { adf_os_atomic_dec(&wmi_handle->pending_cmds); pr_err("%s %d, HTCSendPkt failed\n", __func__, __LINE__); } return ((status == A_OK) ? EOK : -1); }
/* Generic Target to host Msg/event handler for low priority messages Low priority message are handler in a different handler called from this function . So that the most likely succes path like Rx and Tx comp has little code foot print */ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) { struct htt_pdev_t *pdev = (struct htt_pdev_t *) context; adf_nbuf_t htt_t2h_msg = (adf_nbuf_t) pkt->pPktContext; u_int32_t *msg_word; enum htt_t2h_msg_type msg_type; /* check for successful message reception */ if (pkt->Status != A_OK) { if (pkt->Status != A_ECANCELED) { pdev->stats.htc_err_cnt++; } adf_nbuf_free(htt_t2h_msg); return; } #ifdef HTT_RX_RESTORE if (adf_os_unlikely(pdev->rx_ring.rx_reset)) { adf_os_print("rx restore ..\n"); adf_nbuf_free(htt_t2h_msg); return; } #endif /* confirm alignment */ HTT_ASSERT3((((unsigned long) adf_nbuf_data(htt_t2h_msg)) & 0x3) == 0); msg_word = (u_int32_t *) adf_nbuf_data(htt_t2h_msg); msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); switch (msg_type) { case HTT_T2H_MSG_TYPE_RX_IND: { unsigned num_mpdu_ranges; unsigned num_msdu_bytes; u_int16_t peer_id; u_int8_t tid; if (adf_os_unlikely(pdev->cfg.is_full_reorder_offload)) { adf_os_print("HTT_T2H_MSG_TYPE_RX_IND not supported with full " "reorder offload\n"); break; } peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word); tid = HTT_RX_IND_EXT_TID_GET(*msg_word); if (tid >= OL_TXRX_NUM_EXT_TIDS) { adf_os_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n", tid); break; } num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET( *(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32)); /* * 1 word for the message header, * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc * 1 word to specify the number of MSDU bytes, * 1 word for every 4 MSDU bytes (round up), * 1 word for the MPDU range header */ pdev->rx_mpdu_range_offset_words = (HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3) >> 2; num_mpdu_ranges = HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1)); pdev->rx_ind_msdu_byte_idx = 0; if (pdev->cfg.is_high_latency) { /* * TODO: remove copy after stopping reuse skb on HIF layer * because SDIO HIF may reuse skb before upper layer release it */ ol_rx_indication_handler( pdev->txrx_pdev, htt_t2h_msg, peer_id, tid, num_mpdu_ranges); return; } else { ol_rx_indication_handler( pdev->txrx_pdev, htt_t2h_msg, peer_id, tid, num_mpdu_ranges); } break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { int num_msdus; enum htt_tx_status status; /* status - no enum translation needed */ status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word); num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word); if (num_msdus & 0x1) { struct htt_tx_compl_ind_base *compl = (void *)msg_word; /* * Host CPU endianness can be different from FW CPU. This * can result in even and odd MSDU IDs being switched. If * this happens, copy the switched final odd MSDU ID from * location payload[size], to location payload[size-1], * where the message handler function expects to find it */ if (compl->payload[num_msdus] != HTT_TX_COMPL_INV_MSDU_ID) { compl->payload[num_msdus - 1] = compl->payload[num_msdus]; } } if (pdev->cfg.is_high_latency) { ol_tx_target_credit_update( pdev->txrx_pdev, num_msdus /* 1 credit per MSDU */); } ol_tx_completion_handler( pdev->txrx_pdev, num_msdus, status, msg_word + 1); HTT_TX_SCHED(pdev); break; } case HTT_T2H_MSG_TYPE_RX_PN_IND: { u_int16_t peer_id; u_int8_t tid, pn_ie_cnt, *pn_ie=NULL; int seq_num_start, seq_num_end; /*First dword */ peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word); tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word); msg_word++; /*Second dword */ seq_num_start = HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word); seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word); pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word); msg_word++; /*Third dword*/ if (pn_ie_cnt) { pn_ie = (u_int8_t *)msg_word; } ol_rx_pn_ind_handler( pdev->txrx_pdev, peer_id, tid, seq_num_start, seq_num_end, pn_ie_cnt, pn_ie); break; } case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: { int num_msdus; num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word); if (num_msdus & 0x1) { struct htt_tx_compl_ind_base *compl = (void *)msg_word; /* * Host CPU endianness can be different from FW CPU. This * can result in even and odd MSDU IDs being switched. If * this happens, copy the switched final odd MSDU ID from * location payload[size], to location payload[size-1], * where the message handler function expects to find it */ if (compl->payload[num_msdus] != HTT_TX_COMPL_INV_MSDU_ID) { compl->payload[num_msdus - 1] = compl->payload[num_msdus]; } } ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus, msg_word + 1); HTT_TX_SCHED(pdev); break; } case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { u_int16_t peer_id; u_int8_t tid; u_int8_t offload_ind, frag_ind; if (adf_os_unlikely(!pdev->cfg.is_full_reorder_offload)) { adf_os_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported" " when full reorder offload is disabled\n"); break; } if (adf_os_unlikely(pdev->cfg.is_high_latency)) { adf_os_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported" " on high latency\n"); break; } peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word); tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word); offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word); frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word); if (adf_os_unlikely(frag_ind)) { ol_rx_frag_indication_handler(pdev->txrx_pdev, htt_t2h_msg, peer_id, tid); break; } ol_rx_in_order_indication_handler(pdev->txrx_pdev, htt_t2h_msg, peer_id, tid, offload_ind); break; } default: htt_t2h_lp_msg_handler(context, htt_t2h_msg); return ; }; /* Free the indication buffer */ adf_nbuf_free(htt_t2h_msg); }
/* Target to host Msg/event handler for low priority messages*/ void htt_t2h_lp_msg_handler(void *context, adf_nbuf_t htt_t2h_msg ) { struct htt_pdev_t *pdev = (struct htt_pdev_t *) context; u_int32_t *msg_word; enum htt_t2h_msg_type msg_type; msg_word = (u_int32_t *) adf_nbuf_data(htt_t2h_msg); msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); switch (msg_type) { case HTT_T2H_MSG_TYPE_VERSION_CONF: { htc_pm_runtime_put(pdev->htc_pdev); pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); adf_os_print("target uses HTT version %d.%d; host uses %d.%d\n", pdev->tgt_ver.major, pdev->tgt_ver.minor, HTT_CURRENT_VERSION_MAJOR, HTT_CURRENT_VERSION_MINOR); if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { adf_os_print("*** Incompatible host/target HTT versions!\n"); } /* abort if the target is incompatible with the host */ adf_os_assert(pdev->tgt_ver.major == HTT_CURRENT_VERSION_MAJOR); if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { adf_os_print( "*** Warning: host/target HTT versions are different, " "though compatible!\n"); } break; } case HTT_T2H_MSG_TYPE_RX_FLUSH: { u_int16_t peer_id; u_int8_t tid; int seq_num_start, seq_num_end; enum htt_rx_flush_action action; peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word); tid = HTT_RX_FLUSH_TID_GET(*msg_word); seq_num_start = HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word+1)); seq_num_end = HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word+1)); action = HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word+1)) == 1 ? htt_rx_flush_release : htt_rx_flush_discard; ol_rx_flush_handler( pdev->txrx_pdev, peer_id, tid, seq_num_start, seq_num_end, action); break; } case HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND: { int msdu_cnt; msdu_cnt = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word); ol_rx_offload_deliver_ind_handler( pdev->txrx_pdev, htt_t2h_msg, msdu_cnt); break; } case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { u_int16_t peer_id; u_int8_t tid; peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word); tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word); HTT_RX_FRAG_SET_LAST_MSDU(pdev, htt_t2h_msg); ol_rx_frag_indication_handler( pdev->txrx_pdev, htt_t2h_msg, peer_id, tid); break; } case HTT_T2H_MSG_TYPE_RX_ADDBA: { u_int16_t peer_id; u_int8_t tid; u_int8_t win_sz; u_int16_t start_seq_num; /* * FOR NOW, the host doesn't need to know the initial * sequence number for rx aggregation. * Thus, any value will do - specify 0. */ start_seq_num = 0; peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); tid = HTT_RX_ADDBA_TID_GET(*msg_word); win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); ol_rx_addba_handler( pdev->txrx_pdev, peer_id, tid, win_sz, start_seq_num, 0 /* success */); break; } case HTT_T2H_MSG_TYPE_RX_DELBA: { u_int16_t peer_id; u_int8_t tid; peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word); tid = HTT_RX_DELBA_TID_GET(*msg_word); ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid); break; } case HTT_T2H_MSG_TYPE_PEER_MAP: { u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN]; u_int8_t *peer_mac_addr; u_int16_t peer_id; u_int8_t vdev_id; peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); peer_mac_addr = htt_t2h_mac_addr_deswizzle( (u_int8_t *) (msg_word+1), &mac_addr_deswizzle_buf[0]); ol_rx_peer_map_handler( pdev->txrx_pdev, peer_id, vdev_id, peer_mac_addr, 1/*can tx*/); break; } case HTT_T2H_MSG_TYPE_PEER_UNMAP: { u_int16_t peer_id; peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id); break; } case HTT_T2H_MSG_TYPE_SEC_IND: { u_int16_t peer_id; enum htt_sec_type sec_type; int is_unicast; peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); msg_word++; /* point to the first part of the Michael key */ ol_rx_sec_ind_handler( pdev->txrx_pdev, peer_id, sec_type, is_unicast, msg_word, msg_word+2); break; } case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND: { struct htt_mgmt_tx_compl_ind *compl_msg; compl_msg = (struct htt_mgmt_tx_compl_ind *)(msg_word + 1); if (pdev->cfg.is_high_latency) { ol_tx_target_credit_update(pdev->txrx_pdev, 1); } ol_tx_single_completion_handler( pdev->txrx_pdev, compl_msg->status, compl_msg->desc_id); htc_pm_runtime_put(pdev->htc_pdev); HTT_TX_SCHED(pdev); break; } #if TXRX_STATS_LEVEL != TXRX_STATS_LEVEL_OFF case HTT_T2H_MSG_TYPE_STATS_CONF: { u_int64_t cookie; u_int8_t *stats_info_list; cookie = *(msg_word + 1); cookie |= ((u_int64_t) (*(msg_word + 2))) << 32; stats_info_list = (u_int8_t *) (msg_word + 3); htc_pm_runtime_put(pdev->htc_pdev); ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie, stats_info_list); break; } #endif #ifndef REMOVE_PKT_LOG case HTT_T2H_MSG_TYPE_PKTLOG: { u_int32_t *pl_hdr; u_int32_t log_type; pl_hdr = (msg_word + 1); log_type = (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; if (log_type == PKTLOG_TYPE_TX_CTRL || (log_type) == PKTLOG_TYPE_TX_STAT || (log_type) == PKTLOG_TYPE_TX_MSDU_ID || (log_type) == PKTLOG_TYPE_TX_FRM_HDR || (log_type) == PKTLOG_TYPE_TX_VIRT_ADDR) { wdi_event_handler(WDI_EVENT_TX_STATUS, pdev->txrx_pdev, pl_hdr); } else if ((log_type) == PKTLOG_TYPE_RC_FIND) { wdi_event_handler(WDI_EVENT_RATE_FIND, pdev->txrx_pdev, pl_hdr); } else if ((log_type) == PKTLOG_TYPE_RC_UPDATE) { wdi_event_handler( WDI_EVENT_RATE_UPDATE, pdev->txrx_pdev, pl_hdr); } else if ((log_type) == PKTLOG_TYPE_RX_STAT) { wdi_event_handler(WDI_EVENT_RX_DESC, pdev->txrx_pdev, pl_hdr); } break; } #endif case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: { u_int32_t htt_credit_delta_abs; int32_t htt_credit_delta; int sign; htt_credit_delta_abs = HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word); sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1; htt_credit_delta = sign * htt_credit_delta_abs; ol_tx_credit_completion_handler(pdev->txrx_pdev, htt_credit_delta); break; } #ifdef IPA_UC_OFFLOAD case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE: { u_int8_t op_code; u_int16_t len; u_int8_t *op_msg_buffer; u_int8_t *msg_start_ptr; htc_pm_runtime_put(pdev->htc_pdev); msg_start_ptr = (u_int8_t *)msg_word; op_code = HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word); msg_word++; len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word); op_msg_buffer = adf_os_mem_alloc(NULL, sizeof(struct htt_wdi_ipa_op_response_t) + len); if (!op_msg_buffer) { adf_os_print("OPCODE messsage buffer alloc fail"); break; } adf_os_mem_copy(op_msg_buffer, msg_start_ptr, sizeof(struct htt_wdi_ipa_op_response_t) + len); ol_txrx_ipa_uc_op_response(pdev->txrx_pdev, op_msg_buffer); break; } #endif /* IPA_UC_OFFLOAD */ default: break; }; /* Free the indication buffer */ adf_nbuf_free(htt_t2h_msg); }
void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) { u_int32_t id; u_int8_t *data; u_int32_t len; void *wmi_cmd_struct_ptr = NULL; int tlv_ok_status = 0; id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) goto end; data = adf_nbuf_data(evt_buf); len = adf_nbuf_len(evt_buf); /* Validate and pad(if necessary) the TLVs */ tlv_ok_status = wmitlv_check_and_pad_event_tlvs(wmi_handle->scn_handle, data, len, id, &wmi_cmd_struct_ptr); if (tlv_ok_status != 0) { pr_err("%s: Error: id=0x%d, wmitlv_check_and_pad_tlvs ret=%d\n", __func__, id, tlv_ok_status); goto end; } #ifdef FEATURE_WLAN_D0WOW if (wmi_get_d0wow_flag(wmi_handle)) pr_debug("%s: WMI event ID is 0x%x\n", __func__, id); #endif if (id >= WMI_EVT_GRP_START_ID(WMI_GRP_START)) { u_int32_t idx = 0; idx = wmi_unified_get_event_handler_ix(wmi_handle, id) ; if (idx == -1) { pr_err("%s : event handler is not registered: event id 0x%x\n", __func__, id); goto end; } #ifdef WMI_INTERFACE_EVENT_LOGGING adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock); /* Exclude 4 bytes of TLV header */ WMI_EVENT_RECORD(id, ((u_int8_t *)data + 4)); adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif /* Call the WMI registered event handler */ wmi_handle->event_handler[idx](wmi_handle->scn_handle, wmi_cmd_struct_ptr, len); goto end; } switch (id) { default: pr_info("%s: Unhandled WMI event %d\n", __func__, id); break; case WMI_SERVICE_READY_EVENTID: pr_info("%s: WMI UNIFIED SERVICE READY event\n", __func__); wma_rx_service_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; case WMI_READY_EVENTID: pr_info("%s: WMI UNIFIED READY event\n", __func__); wma_rx_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; } end: wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); }
/* * Temporarily added to support older WMI events. We should move all events to unified * when the target is ready to support it. */ void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) { struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx; wmi_buf_t evt_buf; u_int32_t len; void *wmi_cmd_struct_ptr = NULL; u_int32_t idx = 0; int tlv_ok_status = 0; #if defined(WMI_INTERFACE_EVENT_LOGGING) || !defined(QCA_CONFIG_SMP) u_int32_t id; u_int8_t *data; #endif evt_buf = (wmi_buf_t) htc_packet->pPktContext; id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); /* TX_PAUSE EVENT should be handled with tasklet context */ if ((WMI_TX_PAUSE_EVENTID == id) || (WMI_WOW_WAKEUP_HOST_EVENTID == id)) { if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) return; data = adf_nbuf_data(evt_buf); len = adf_nbuf_len(evt_buf); tlv_ok_status = wmitlv_check_and_pad_event_tlvs( wmi_handle->scn_handle, data, len, id, &wmi_cmd_struct_ptr); if (tlv_ok_status != 0) { if (tlv_ok_status == 1) { wmi_cmd_struct_ptr = data; } else { return; } } idx = wmi_unified_get_event_handler_ix(wmi_handle, id); if (idx == -1) { wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); return; } wmi_handle->event_handler[idx](wmi_handle->scn_handle, wmi_cmd_struct_ptr, len); wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); return; } #ifdef WMI_INTERFACE_EVENT_LOGGING id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); data = adf_nbuf_data(evt_buf); adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock); /* Exclude 4 bytes of TLV header */ WMI_RX_EVENT_RECORD(id, ((u_int8_t *)data + 4)); adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif adf_os_spin_lock_bh(&wmi_handle->eventq_lock); adf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); adf_os_spin_unlock_bh(&wmi_handle->eventq_lock); schedule_work(&wmi_handle->rx_event_work); }