A_STATUS htt_h2t_sync_msg(struct htt_pdev_t *pdev, u_int8_t sync_cnt) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_NO_MEMORY; } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_SYNC_MSG_SZ), /* reserve room for HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_NO_MEMORY; } /* set the length of the message */ adf_nbuf_put_tail(msg, HTT_H2T_SYNC_MSG_SZ); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SYNC); HTT_H2T_SYNC_COUNT_SET(*msg_word, sync_cnt); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, HTC_TX_PACKET_TAG_RUNTIME_PUT); SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) htt_htc_misc_pkt_list_add(pdev, pkt); #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if ((pdev->cfg.is_high_latency) && (!pdev->cfg.default_tx_comp_req)) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return A_OK; }
/* Generic Target to host Msg/event handler for low priority messages Low priority message are handler in a different handler called from this function . So that the most likely succes path like Rx and Tx comp has little code foot print */ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) { struct htt_pdev_t *pdev = (struct htt_pdev_t *) context; adf_nbuf_t htt_t2h_msg = (adf_nbuf_t) pkt->pPktContext; u_int32_t *msg_word; enum htt_t2h_msg_type msg_type; /* check for successful message reception */ if (pkt->Status != A_OK) { if (pkt->Status != A_ECANCELED) { pdev->stats.htc_err_cnt++; } adf_nbuf_free(htt_t2h_msg); return; } #ifdef HTT_RX_RESTORE if (adf_os_unlikely(pdev->rx_ring.rx_reset)) { adf_os_print("rx restore ..\n"); adf_nbuf_free(htt_t2h_msg); return; } #endif /* confirm alignment */ HTT_ASSERT3((((unsigned long) adf_nbuf_data(htt_t2h_msg)) & 0x3) == 0); msg_word = (u_int32_t *) adf_nbuf_data(htt_t2h_msg); msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); switch (msg_type) { case HTT_T2H_MSG_TYPE_RX_IND: { unsigned num_mpdu_ranges; unsigned num_msdu_bytes; u_int16_t peer_id; u_int8_t tid; if (adf_os_unlikely(pdev->cfg.is_full_reorder_offload)) { adf_os_print("HTT_T2H_MSG_TYPE_RX_IND not supported with full " "reorder offload\n"); break; } peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word); tid = HTT_RX_IND_EXT_TID_GET(*msg_word); if (tid >= OL_TXRX_NUM_EXT_TIDS) { adf_os_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n", tid); break; } num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET( *(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32)); /* * 1 word for the message header, * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc * 1 word to specify the number of MSDU bytes, * 1 word for every 4 MSDU bytes (round up), * 1 word for the MPDU range header */ pdev->rx_mpdu_range_offset_words = (HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3) >> 2; num_mpdu_ranges = HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1)); pdev->rx_ind_msdu_byte_idx = 0; if (pdev->cfg.is_high_latency) { /* * TODO: remove copy after stopping reuse skb on HIF layer * because SDIO HIF may reuse skb before upper layer release it */ ol_rx_indication_handler( pdev->txrx_pdev, htt_t2h_msg, peer_id, tid, num_mpdu_ranges); return; } else { ol_rx_indication_handler( pdev->txrx_pdev, htt_t2h_msg, peer_id, tid, num_mpdu_ranges); } break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { int num_msdus; enum htt_tx_status status; /* status - no enum translation needed */ status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word); num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word); if (num_msdus & 0x1) { struct htt_tx_compl_ind_base *compl = (void *)msg_word; /* * Host CPU endianness can be different from FW CPU. This * can result in even and odd MSDU IDs being switched. If * this happens, copy the switched final odd MSDU ID from * location payload[size], to location payload[size-1], * where the message handler function expects to find it */ if (compl->payload[num_msdus] != HTT_TX_COMPL_INV_MSDU_ID) { compl->payload[num_msdus - 1] = compl->payload[num_msdus]; } } if (pdev->cfg.is_high_latency) { ol_tx_target_credit_update( pdev->txrx_pdev, num_msdus /* 1 credit per MSDU */); } ol_tx_completion_handler( pdev->txrx_pdev, num_msdus, status, msg_word + 1); HTT_TX_SCHED(pdev); break; } case HTT_T2H_MSG_TYPE_RX_PN_IND: { u_int16_t peer_id; u_int8_t tid, pn_ie_cnt, *pn_ie=NULL; int seq_num_start, seq_num_end; /*First dword */ peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word); tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word); msg_word++; /*Second dword */ seq_num_start = HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word); seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word); pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word); msg_word++; /*Third dword*/ if (pn_ie_cnt) { pn_ie = (u_int8_t *)msg_word; } ol_rx_pn_ind_handler( pdev->txrx_pdev, peer_id, tid, seq_num_start, seq_num_end, pn_ie_cnt, pn_ie); break; } case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: { int num_msdus; num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word); if (num_msdus & 0x1) { struct htt_tx_compl_ind_base *compl = (void *)msg_word; /* * Host CPU endianness can be different from FW CPU. This * can result in even and odd MSDU IDs being switched. If * this happens, copy the switched final odd MSDU ID from * location payload[size], to location payload[size-1], * where the message handler function expects to find it */ if (compl->payload[num_msdus] != HTT_TX_COMPL_INV_MSDU_ID) { compl->payload[num_msdus - 1] = compl->payload[num_msdus]; } } ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus, msg_word + 1); HTT_TX_SCHED(pdev); break; } case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { u_int16_t peer_id; u_int8_t tid; u_int8_t offload_ind, frag_ind; if (adf_os_unlikely(!pdev->cfg.is_full_reorder_offload)) { adf_os_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported" " when full reorder offload is disabled\n"); break; } if (adf_os_unlikely(pdev->cfg.is_high_latency)) { adf_os_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported" " on high latency\n"); break; } peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word); tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word); offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word); frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word); if (adf_os_unlikely(frag_ind)) { ol_rx_frag_indication_handler(pdev->txrx_pdev, htt_t2h_msg, peer_id, tid); break; } ol_rx_in_order_indication_handler(pdev->txrx_pdev, htt_t2h_msg, peer_id, tid, offload_ind); break; } default: htt_t2h_lp_msg_handler(context, htt_t2h_msg); return ; }; /* Free the indication buffer */ adf_nbuf_free(htt_t2h_msg); }
/* Target to host Msg/event handler for low priority messages*/ void htt_t2h_lp_msg_handler(void *context, adf_nbuf_t htt_t2h_msg ) { struct htt_pdev_t *pdev = (struct htt_pdev_t *) context; u_int32_t *msg_word; enum htt_t2h_msg_type msg_type; msg_word = (u_int32_t *) adf_nbuf_data(htt_t2h_msg); msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); switch (msg_type) { case HTT_T2H_MSG_TYPE_VERSION_CONF: { htc_pm_runtime_put(pdev->htc_pdev); pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); adf_os_print("target uses HTT version %d.%d; host uses %d.%d\n", pdev->tgt_ver.major, pdev->tgt_ver.minor, HTT_CURRENT_VERSION_MAJOR, HTT_CURRENT_VERSION_MINOR); if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { adf_os_print("*** Incompatible host/target HTT versions!\n"); } /* abort if the target is incompatible with the host */ adf_os_assert(pdev->tgt_ver.major == HTT_CURRENT_VERSION_MAJOR); if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { adf_os_print( "*** Warning: host/target HTT versions are different, " "though compatible!\n"); } break; } case HTT_T2H_MSG_TYPE_RX_FLUSH: { u_int16_t peer_id; u_int8_t tid; int seq_num_start, seq_num_end; enum htt_rx_flush_action action; peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word); tid = HTT_RX_FLUSH_TID_GET(*msg_word); seq_num_start = HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word+1)); seq_num_end = HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word+1)); action = HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word+1)) == 1 ? htt_rx_flush_release : htt_rx_flush_discard; ol_rx_flush_handler( pdev->txrx_pdev, peer_id, tid, seq_num_start, seq_num_end, action); break; } case HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND: { int msdu_cnt; msdu_cnt = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word); ol_rx_offload_deliver_ind_handler( pdev->txrx_pdev, htt_t2h_msg, msdu_cnt); break; } case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { u_int16_t peer_id; u_int8_t tid; peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word); tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word); HTT_RX_FRAG_SET_LAST_MSDU(pdev, htt_t2h_msg); ol_rx_frag_indication_handler( pdev->txrx_pdev, htt_t2h_msg, peer_id, tid); break; } case HTT_T2H_MSG_TYPE_RX_ADDBA: { u_int16_t peer_id; u_int8_t tid; u_int8_t win_sz; u_int16_t start_seq_num; /* * FOR NOW, the host doesn't need to know the initial * sequence number for rx aggregation. * Thus, any value will do - specify 0. */ start_seq_num = 0; peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); tid = HTT_RX_ADDBA_TID_GET(*msg_word); win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); ol_rx_addba_handler( pdev->txrx_pdev, peer_id, tid, win_sz, start_seq_num, 0 /* success */); break; } case HTT_T2H_MSG_TYPE_RX_DELBA: { u_int16_t peer_id; u_int8_t tid; peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word); tid = HTT_RX_DELBA_TID_GET(*msg_word); ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid); break; } case HTT_T2H_MSG_TYPE_PEER_MAP: { u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN]; u_int8_t *peer_mac_addr; u_int16_t peer_id; u_int8_t vdev_id; peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); peer_mac_addr = htt_t2h_mac_addr_deswizzle( (u_int8_t *) (msg_word+1), &mac_addr_deswizzle_buf[0]); ol_rx_peer_map_handler( pdev->txrx_pdev, peer_id, vdev_id, peer_mac_addr, 1/*can tx*/); break; } case HTT_T2H_MSG_TYPE_PEER_UNMAP: { u_int16_t peer_id; peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id); break; } case HTT_T2H_MSG_TYPE_SEC_IND: { u_int16_t peer_id; enum htt_sec_type sec_type; int is_unicast; peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); msg_word++; /* point to the first part of the Michael key */ ol_rx_sec_ind_handler( pdev->txrx_pdev, peer_id, sec_type, is_unicast, msg_word, msg_word+2); break; } case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND: { struct htt_mgmt_tx_compl_ind *compl_msg; compl_msg = (struct htt_mgmt_tx_compl_ind *)(msg_word + 1); if (pdev->cfg.is_high_latency) { ol_tx_target_credit_update(pdev->txrx_pdev, 1); } ol_tx_single_completion_handler( pdev->txrx_pdev, compl_msg->status, compl_msg->desc_id); htc_pm_runtime_put(pdev->htc_pdev); HTT_TX_SCHED(pdev); break; } #if TXRX_STATS_LEVEL != TXRX_STATS_LEVEL_OFF case HTT_T2H_MSG_TYPE_STATS_CONF: { u_int64_t cookie; u_int8_t *stats_info_list; cookie = *(msg_word + 1); cookie |= ((u_int64_t) (*(msg_word + 2))) << 32; stats_info_list = (u_int8_t *) (msg_word + 3); htc_pm_runtime_put(pdev->htc_pdev); ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie, stats_info_list); break; } #endif #ifndef REMOVE_PKT_LOG case HTT_T2H_MSG_TYPE_PKTLOG: { u_int32_t *pl_hdr; u_int32_t log_type; pl_hdr = (msg_word + 1); log_type = (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; if (log_type == PKTLOG_TYPE_TX_CTRL || (log_type) == PKTLOG_TYPE_TX_STAT || (log_type) == PKTLOG_TYPE_TX_MSDU_ID || (log_type) == PKTLOG_TYPE_TX_FRM_HDR || (log_type) == PKTLOG_TYPE_TX_VIRT_ADDR) { wdi_event_handler(WDI_EVENT_TX_STATUS, pdev->txrx_pdev, pl_hdr); } else if ((log_type) == PKTLOG_TYPE_RC_FIND) { wdi_event_handler(WDI_EVENT_RATE_FIND, pdev->txrx_pdev, pl_hdr); } else if ((log_type) == PKTLOG_TYPE_RC_UPDATE) { wdi_event_handler( WDI_EVENT_RATE_UPDATE, pdev->txrx_pdev, pl_hdr); } else if ((log_type) == PKTLOG_TYPE_RX_STAT) { wdi_event_handler(WDI_EVENT_RX_DESC, pdev->txrx_pdev, pl_hdr); } break; } #endif case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: { u_int32_t htt_credit_delta_abs; int32_t htt_credit_delta; int sign; htt_credit_delta_abs = HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word); sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1; htt_credit_delta = sign * htt_credit_delta_abs; ol_tx_credit_completion_handler(pdev->txrx_pdev, htt_credit_delta); break; } #ifdef IPA_UC_OFFLOAD case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE: { u_int8_t op_code; u_int16_t len; u_int8_t *op_msg_buffer; u_int8_t *msg_start_ptr; htc_pm_runtime_put(pdev->htc_pdev); msg_start_ptr = (u_int8_t *)msg_word; op_code = HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word); msg_word++; len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word); op_msg_buffer = adf_os_mem_alloc(NULL, sizeof(struct htt_wdi_ipa_op_response_t) + len); if (!op_msg_buffer) { adf_os_print("OPCODE messsage buffer alloc fail"); break; } adf_os_mem_copy(op_msg_buffer, msg_start_ptr, sizeof(struct htt_wdi_ipa_op_response_t) + len); ol_txrx_ipa_uc_op_response(pdev->txrx_pdev, op_msg_buffer); break; } #endif /* IPA_UC_OFFLOAD */ default: break; }; /* Free the indication buffer */ adf_nbuf_free(htt_t2h_msg); }
int htt_h2t_dbg_stats_get( struct htt_pdev_t *pdev, u_int32_t stats_type_upload_mask, u_int32_t stats_type_reset_mask, u_int8_t cfg_stat_type, u_int32_t cfg_val, u_int64_t cookie) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; uint16_t htc_tag = 1; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return -1; /* failure */ } if (stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS || stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) { /* FIX THIS - add more details? */ adf_os_print("%#x %#x stats not supported\n", stats_type_upload_mask, stats_type_reset_mask); return -1; /* failure */ } if (stats_type_reset_mask) htc_tag = HTC_TX_PACKET_TAG_RUNTIME_PUT; /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_STATS_REQ_MSG_SZ), /* reserve room for HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return -1; /* failure */ } /* set the length of the message */ adf_nbuf_put_tail(msg, HTT_H2T_STATS_REQ_MSG_SZ); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_STATS_REQ); HTT_H2T_STATS_REQ_UPLOAD_TYPES_SET(*msg_word, stats_type_upload_mask); msg_word++; *msg_word = 0; HTT_H2T_STATS_REQ_RESET_TYPES_SET(*msg_word, stats_type_reset_mask); msg_word++; *msg_word = 0; HTT_H2T_STATS_REQ_CFG_VAL_SET(*msg_word, cfg_val); HTT_H2T_STATS_REQ_CFG_STAT_TYPE_SET(*msg_word, cfg_stat_type); /* cookie LSBs */ msg_word++; *msg_word = cookie & 0xffffffff; /* cookie MSBs */ msg_word++; *msg_word = cookie >> 32; SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, htc_tag); SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) { htt_htc_misc_pkt_list_add(pdev, pkt); } #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if ((pdev->cfg.is_high_latency) && (!pdev->cfg.default_tx_comp_req)) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return 0; }
A_STATUS htt_h2t_rx_ring_cfg_msg_hl(struct htt_pdev_t *pdev) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_ERROR; /* failure */ } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_RX_RING_CFG_BYTES(1)), /* reserve room for the HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_ERROR; /* failure */ } /* * Set the length of the message. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added * separately during the below call to adf_nbuf_push_head. * The contribution from the HTC header is added separately inside HTC. */ adf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1)); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_CFG); HTT_RX_RING_CFG_NUM_RINGS_SET(*msg_word, 1); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_SET( *msg_word, pdev->rx_ring.alloc_idx.paddr); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_BASE_PADDR_SET(*msg_word, pdev->rx_ring.base_paddr); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_LEN_SET(*msg_word, pdev->rx_ring.size); HTT_RX_RING_CFG_BUF_SZ_SET(*msg_word, HTT_RX_BUF_SIZE); /* FIX THIS: if the FW creates a complete translated rx descriptor, then the MAC DMA of the HW rx descriptor should be disabled. */ msg_word++; *msg_word = 0; HTT_RX_RING_CFG_ENABLED_802_11_HDR_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MSDU_PAYLD_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_PPDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_PPDU_END_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MPDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MPDU_END_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MSDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MSDU_END_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_RX_ATTN_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_FRAG_INFO_SET(*msg_word, 0); /* always present? */ HTT_RX_RING_CFG_ENABLED_UCAST_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_MCAST_SET(*msg_word, 1); /* Must change to dynamic enable at run time * rather than at compile time */ HTT_RX_RING_CFG_ENABLED_CTRL_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MGMT_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_NULL_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_PHY_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_802_11_HDR_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_MSDU_PAYLD_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_PPDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_PPDU_END_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_MPDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_MPDU_END_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_MSDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_MSDU_END_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_RX_ATTN_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_FRAG_INFO_SET(*msg_word, 0); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, 1); /* tag - not relevant here */ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) { htt_htc_misc_pkt_list_add(pdev, pkt); } #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if (!pdev->cfg.default_tx_comp_req) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return A_OK; }
A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; u_int32_t msg_size; u_int32_t max_tx_group; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_ERROR; /* failure */ } max_tx_group = OL_TX_GET_MAX_GROUPS(pdev->txrx_pdev); if (max_tx_group) { msg_size = HTT_VER_REQ_BYTES + sizeof(struct htt_option_tlv_mac_tx_queue_groups_t); } else { msg_size = HTT_VER_REQ_BYTES; } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(msg_size), /* reserve room for the HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_ERROR; /* failure */ } /* * Set the length of the message. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added * separately during the below call to adf_nbuf_push_head. * The contribution from the HTC header is added separately inside HTC. */ adf_nbuf_put_tail(msg, msg_size); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); if (max_tx_group) { *(msg_word + 1) = 0; /* Fill Group Info */ HTT_OPTION_TLV_TAG_SET(*(msg_word+1), HTT_OPTION_TLV_TAG_MAX_TX_QUEUE_GROUPS); HTT_OPTION_TLV_LENGTH_SET(*(msg_word+1), (sizeof(struct htt_option_tlv_mac_tx_queue_groups_t)/ sizeof(u_int32_t))); HTT_OPTION_TLV_VALUE0_SET(*(msg_word+1), max_tx_group); } SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, 1); /* tag - not relevant here */ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) { htt_htc_misc_pkt_list_add(pdev, pkt); } #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if ((pdev->cfg.is_high_latency) && (!pdev->cfg.default_tx_comp_req)) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return A_OK; }