A_STATUS htt_h2t_sync_msg(struct htt_pdev_t *pdev, u_int8_t sync_cnt) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_NO_MEMORY; } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_SYNC_MSG_SZ), /* reserve room for HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_NO_MEMORY; } /* set the length of the message */ adf_nbuf_put_tail(msg, HTT_H2T_SYNC_MSG_SZ); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SYNC); HTT_H2T_SYNC_COUNT_SET(*msg_word, sync_cnt); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, HTC_TX_PACKET_TAG_RUNTIME_PUT); SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) htt_htc_misc_pkt_list_add(pdev, pkt); #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if ((pdev->cfg.is_high_latency) && (!pdev->cfg.default_tx_comp_req)) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return A_OK; }
static adf_nbuf_t ath_alloc_skb_tx99(adf_os_handle_t os_hdl, u_int size, u_int align) { adf_nbuf_t skb; skb = adf_nbuf_alloc(os_hdl, size, 0, align, FALSE); return skb; }
static HTC_PACKET *BuildHTCTxCtrlPacket(adf_os_device_t osdev) { HTC_PACKET *pPacket = NULL; adf_nbuf_t netbuf; do { pPacket = (HTC_PACKET *)A_MALLOC(sizeof(HTC_PACKET)); if (NULL == pPacket) { break; } A_MEMZERO(pPacket,sizeof(HTC_PACKET)); #ifdef ATH_USE_NCNB netbuf = adf_nbuf_alloc_ncnb(osdev, HTC_CONTROL_BUFFER_SIZE, 20, 4, TRUE); #else netbuf = adf_nbuf_alloc(osdev, HTC_CONTROL_BUFFER_SIZE, 20, 4, TRUE); #endif if (NULL == netbuf) { A_FREE(pPacket); pPacket = NULL; adf_os_print("%s: nbuf alloc failed\n",__func__); break; } AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("alloc ctrl netbuf :0x%p \n", netbuf)); SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, netbuf); } while (FALSE); return pPacket; }
A_STATUS HTCStart(HTC_HANDLE HTCHandle) { adf_nbuf_t netbuf; A_STATUS status = A_OK; HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); HTC_SETUP_COMPLETE_MSG *SetupComp; AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Enter\n")); do { HTCConfigTargetHIFPipe(HTCHandle); #ifdef HTC_HOST_CREDIT_DIST adf_os_assert(target->InitCredits != NULL); adf_os_assert(target->EpCreditDistributionListHead != NULL); adf_os_assert(target->EpCreditDistributionListHead->pNext != NULL); /* call init credits callback to do the distribution , * NOTE: the first entry in the distribution list is ENDPOINT_0, so * we pass the start of the list after this one. */ target->InitCredits(target->pCredDistContext, target->EpCreditDistributionListHead->pNext, target->TargetCredits); #if 1 adf_os_timer_init(target->os_handle, &target->host_htc_credit_debug_timer, host_htc_credit_show, target); adf_os_timer_start(&target->host_htc_credit_debug_timer, 10000); #endif #endif /* allocate a buffer to send */ //netbuf = adf_nbuf_alloc(anet, sizeof(HTC_SETUP_COMPLETE_MSG), HTC_HDR_LENGTH, 0); netbuf = adf_nbuf_alloc(50, HTC_HDR_LENGTH, 0); if (netbuf == ADF_NBUF_NULL) { status = A_NO_MEMORY; break; } /* assemble setup complete message */ SetupComp = (HTC_SETUP_COMPLETE_MSG *)adf_nbuf_put_tail(netbuf, sizeof(HTC_SETUP_COMPLETE_MSG)); SetupComp->MessageID = adf_os_htons(HTC_MSG_SETUP_COMPLETE_ID); /* assemble the HTC header and send to HIF layer */ status = HTCIssueSend(target, ADF_NBUF_NULL, netbuf, 0, sizeof(HTC_SETUP_COMPLETE_MSG), ENDPOINT0); if (A_FAILED(status)) { break; } } while (FALSE); AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Exit\n")); return status; }
/* post recv urbs for a given pipe */ static void usb_hif_post_recv_transfers(HIF_USB_PIPE *recv_pipe, int buffer_length) { HIF_URB_CONTEXT *urb_context; a_uint8_t *data; a_uint32_t len; struct urb *urb; int usb_status; AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+%s\n", __func__)); while (1) { urb_context = usb_hif_alloc_urb_from_pipe(recv_pipe); if (NULL == urb_context) break; urb_context->buf = adf_nbuf_alloc(NULL, buffer_length, 0, 4, FALSE); if (NULL == urb_context->buf) { usb_hif_cleanup_recv_urb(urb_context); break; } adf_nbuf_peek_header(urb_context->buf, &data, &len); urb = urb_context->urb; usb_fill_bulk_urb(urb, recv_pipe->device->udev, recv_pipe->usb_pipe_handle, data, buffer_length, usb_hif_usb_recv_complete, urb_context); AR_DEBUG_PRINTF(USB_HIF_DEBUG_BULK_IN, ( "athusb bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes, buf:0x%p\n", recv_pipe->logical_pipe_num, recv_pipe->usb_pipe_handle, recv_pipe->ep_address, buffer_length, urb_context->buf)); usb_hif_enqueue_pending_transfer(recv_pipe, urb_context); usb_status = usb_submit_urb(urb, GFP_ATOMIC); if (usb_status) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("athusb : usb bulk recv failed %d\n", usb_status)); usb_hif_remove_pending_transfer(urb_context); usb_hif_cleanup_recv_urb(urb_context); break; } } AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-%s\n", __func__)); }
LOCAL adf_nbuf_t _buf_pool_dynamic_alloc_buf_align(pool_handle_t handle, BUF_POOL_ID poolId, int reserve, int align) { BUF_POOL_DYNAMIC_CONTEXT *ctx = (BUF_POOL_DYNAMIC_CONTEXT *)handle; POOL_CONFIG *poolConf = &ctx->poolConf[poolId]; return adf_nbuf_alloc(poolConf->nSize, reserve, align); }
int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev) { struct htt_htc_pkt *pkt = NULL; adf_nbuf_t msg = NULL; u_int32_t *msg_word; /* New buffer alloc send */ pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_NO_MEMORY; } /* show that this is not a tx frame download (not required, * but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ), /* reserve room for HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_NO_MEMORY; } /* set the length of the message */ adf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); *msg_word = 0; HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word, HTT_WDI_IPA_OPCODE_DBG_STATS); HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, 1); /* tag - not relevant here */ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) htt_htc_misc_pkt_list_add(pdev, pkt); #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif return A_OK; }
A_STATUS ConfigPipeCredits(HTC_TARGET *target, a_uint8_t pipeID, a_uint8_t credits) { A_STATUS status = A_OK; adf_nbuf_t netbuf; HTC_CONFIG_PIPE_MSG *CfgPipeCdt; do { /* allocate a buffer to send */ //netbuf = adf_nbuf_alloc(anet, sizeof(HTC_CONFIG_PIPE_MSG), HTC_HDR_LENGTH, 0); netbuf = adf_nbuf_alloc(50, HTC_HDR_LENGTH, 0); if (netbuf == ADF_NBUF_NULL) { status = A_NO_MEMORY; break; } /* assemble config pipe message */ CfgPipeCdt = (HTC_CONFIG_PIPE_MSG *)adf_nbuf_put_tail(netbuf, sizeof(HTC_CONFIG_PIPE_MSG)); CfgPipeCdt->MessageID = adf_os_htons(HTC_MSG_CONFIG_PIPE_ID); CfgPipeCdt->PipeID = pipeID; CfgPipeCdt->CreditCount = credits; /* assemble the HTC header and send to HIF layer */ #ifndef MAGPIE_SINGLE_CPU_CASE htc_spin_prep(&target->spin); #endif status = HTCIssueSend(target, ADF_NBUF_NULL, netbuf, 0, sizeof(HTC_CONFIG_PIPE_MSG), ENDPOINT0); if (A_FAILED(status)) { break; } htc_spin( &target->spin ); if (target->cfg_pipe_rsp_stat == HTC_CONFIGPIPE_SUCCESS) { status = A_OK; } else { status = A_ERROR; } } while(FALSE); return status; }
adf_nbuf_t RxSgToSingleNetbuf(HTC_TARGET *target) { adf_nbuf_t skb; a_uint8_t *anbdata; a_uint8_t *anbdata_new; a_uint32_t anblen; adf_nbuf_t new_skb = NULL; a_uint32_t sg_queue_len; adf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue; sg_queue_len = adf_nbuf_queue_len(rx_sg_queue); if (sg_queue_len <= 1) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("RxSgToSingleNetbuf: invalid sg queue len %u\n")); goto _failed; } new_skb = adf_nbuf_alloc(target->ExpRxSgTotalLen, 0, 4, FALSE); if (new_skb == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("RxSgToSingleNetbuf: can't allocate %u size netbuf\n", target->ExpRxSgTotalLen)); goto _failed; } adf_nbuf_peek_header(new_skb, &anbdata_new, &anblen); skb = adf_nbuf_queue_remove(rx_sg_queue); do { adf_nbuf_peek_header(skb, &anbdata, &anblen); adf_os_mem_copy(anbdata_new, anbdata, adf_nbuf_len(skb)); adf_nbuf_put_tail(new_skb, adf_nbuf_len(skb)); anbdata_new += adf_nbuf_len(skb); adf_nbuf_free(skb); skb = adf_nbuf_queue_remove(rx_sg_queue); } while(skb != NULL); RESET_RX_SG_CONFIG(target); return new_skb; _failed: while ((skb = adf_nbuf_queue_remove(rx_sg_queue)) != NULL) { adf_nbuf_free(skb); } RESET_RX_SG_CONFIG(target); return NULL; }
a_status_t pci_dma_recv_refill(adf_os_device_t osdev, zdma_swdesc_t *swdesc, a_uint32_t size) { adf_nbuf_t buf; buf = adf_nbuf_alloc(osdev, size, 0, PCI_NBUF_ALIGNMENT); if (!buf) adf_os_assert(0); pci_dma_link_buf(osdev, swdesc, buf); pci_zdma_mark_rdy(swdesc, (ZM_FS_BIT | ZM_LS_BIT)); return A_STATUS_OK; }
a_status_t fwd_send_next(fwd_softc_t *sc) { a_uint32_t len, alloclen; adf_nbuf_t nbuf; fwd_cmd_t *h; a_uint8_t *pld; a_uint32_t target_jmp_loc; len = fwd_chunk_len(sc); alloclen = sizeof(fwd_cmd_t) + len; if (fwd_is_first(sc) || fwd_is_last(sc)) alloclen += 4; nbuf = adf_nbuf_alloc(NULL,alloclen + 20, 20, 0); if (!nbuf) { adf_os_print("FWD: packet allocation failed. \n"); return A_STATUS_ENOMEM; } h = (fwd_cmd_t *)adf_nbuf_put_tail(nbuf, alloclen); h->more_data = adf_os_htons(!fwd_is_last(sc)); h->len = adf_os_htons(len); h->offset = adf_os_htonl(sc->offset); pld = (a_uint8_t *)(h + 1); if (fwd_is_first(sc)) { *(a_uint32_t *)pld = adf_os_htonl(sc->target_upload_addr); pld += 4; } adf_os_mem_copy(pld, &sc->image[sc->offset], len); if(h->more_data == 0) { target_jmp_loc = adf_os_htonl(fw_exec_addr); adf_os_mem_copy(pld+len, (a_uint8_t *)&target_jmp_loc, 4); } HIFSend(sc->hif_handle, sc->tx_pipe, NULL, nbuf); /*adf_os_timer_start(&sc->tmr, FWD_TIMEOUT_MSECS);*/ return A_STATUS_OK; }
/** * @brief Initialize the RX ring * * @param osdev * @param dma_q * @param num_desc * @param buf_size */ void pci_dma_init_rx(adf_os_device_t osdev, pci_dma_softc_t *dma_q, a_uint32_t num_desc, adf_os_size_t buf_size) { a_uint32_t i; zdma_swdesc_t *swdesc; adf_nbuf_t buf; pci_dma_alloc_swdesc(osdev, dma_q, num_desc); swdesc = dma_q->sw_ring; for (i = 0; i < num_desc ; i++) { adf_nbuf_dmamap_create(osdev, &swdesc[i].nbuf_map); buf = adf_nbuf_alloc(osdev, buf_size, 0, PCI_NBUF_ALIGNMENT); adf_os_assert(buf); pci_dma_recv_init(osdev, dma_q, buf, i); } swdesc = &dma_q->sw_ring[dma_q->tail]; }
wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, u_int16_t len) { wmi_buf_t wmi_buf; if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { VOS_ASSERT(0); return NULL; } wmi_buf = adf_nbuf_alloc(NULL, roundup(len + WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, FALSE); if (!wmi_buf) return NULL; /* Clear the wmi buffer */ OS_MEMZERO(adf_nbuf_data(wmi_buf), len); /* * Set the length of the buffer to match the allocation size. */ adf_nbuf_set_pktlen(wmi_buf, len); return wmi_buf; }
static A_STATUS usb_hif_alloc_pipe_resources(HIF_USB_PIPE *pipe, int urb_cnt) { A_STATUS status = A_OK; int i; HIF_URB_CONTEXT *urb_context; DL_LIST_INIT(&pipe->urb_list_head); DL_LIST_INIT(&pipe->urb_pending_list); for (i = 0; i < urb_cnt; i++) { urb_context = adf_os_mem_alloc(NULL, sizeof(*urb_context)); if (NULL == urb_context) { status = A_NO_MEMORY; AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("urb_context is null\n")); break; } adf_os_mem_zero(urb_context, sizeof(HIF_URB_CONTEXT)); urb_context->pipe = pipe; urb_context->urb = usb_alloc_urb(0, GFP_KERNEL); if (NULL == urb_context->urb) { status = A_NO_MEMORY; adf_os_mem_free(urb_context); AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("urb_context->urb is null\n")); break; } /* note we are only allocate the urb contexts here, the actual * URB is * allocated from the kernel as needed to do a transaction */ pipe->urb_alloc++; if (htc_bundle_send) { /* In tx bundle mode, only pre-allocate bundle buffers * for data * pipes */ if (pipe->logical_pipe_num >= HIF_TX_DATA_LP_PIPE && pipe->logical_pipe_num <= HIF_TX_DATA_HP_PIPE) { urb_context->buf = adf_nbuf_alloc(NULL, HIF_USB_TX_BUNDLE_BUFFER_SIZE, 0, 4, FALSE); if (NULL == urb_context->buf) { status = A_NO_MEMORY; usb_free_urb(urb_context->urb); urb_context->urb = NULL; adf_os_mem_free(urb_context); AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ( "athusb: alloc send bundle buffer %d-byte failed\n", HIF_USB_TX_BUNDLE_BUFFER_SIZE)); break; } } skb_queue_head_init(&urb_context->comp_queue); } usb_hif_free_urb_to_pipe(pipe, urb_context); } AR_DEBUG_PRINTF(USB_HIF_DEBUG_ENUM, ( "athusb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc)); return status; }
A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; u_int32_t msg_size; u_int32_t max_tx_group; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_ERROR; /* failure */ } max_tx_group = OL_TX_GET_MAX_GROUPS(pdev->txrx_pdev); if (max_tx_group) { msg_size = HTT_VER_REQ_BYTES + sizeof(struct htt_option_tlv_mac_tx_queue_groups_t); } else { msg_size = HTT_VER_REQ_BYTES; } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(msg_size), /* reserve room for the HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_ERROR; /* failure */ } /* * Set the length of the message. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added * separately during the below call to adf_nbuf_push_head. * The contribution from the HTC header is added separately inside HTC. */ adf_nbuf_put_tail(msg, msg_size); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); if (max_tx_group) { *(msg_word + 1) = 0; /* Fill Group Info */ HTT_OPTION_TLV_TAG_SET(*(msg_word+1), HTT_OPTION_TLV_TAG_MAX_TX_QUEUE_GROUPS); HTT_OPTION_TLV_LENGTH_SET(*(msg_word+1), (sizeof(struct htt_option_tlv_mac_tx_queue_groups_t)/ sizeof(u_int32_t))); HTT_OPTION_TLV_VALUE0_SET(*(msg_word+1), max_tx_group); } SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, 1); /* tag - not relevant here */ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) { htt_htc_misc_pkt_list_add(pdev, pkt); } #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if ((pdev->cfg.is_high_latency) && (!pdev->cfg.default_tx_comp_req)) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return A_OK; }
A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; int enable_ctrl_data, enable_mgmt_data, enable_null_data, enable_phy_data, enable_hdr, enable_ppdu_start, enable_ppdu_end; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_ERROR; /* failure */ } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_RX_RING_CFG_BYTES(1)), /* reserve room for the HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_ERROR; /* failure */ } /* * Set the length of the message. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added * separately during the below call to adf_nbuf_push_head. * The contribution from the HTC header is added separately inside HTC. */ adf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1)); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_CFG); HTT_RX_RING_CFG_NUM_RINGS_SET(*msg_word, 1); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_SET( *msg_word, pdev->rx_ring.alloc_idx.paddr); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_BASE_PADDR_SET(*msg_word, pdev->rx_ring.base_paddr); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_LEN_SET(*msg_word, pdev->rx_ring.size); HTT_RX_RING_CFG_BUF_SZ_SET(*msg_word, HTT_RX_BUF_SIZE); /* FIX THIS: if the FW creates a complete translated rx descriptor, then the MAC DMA of the HW rx descriptor should be disabled. */ msg_word++; *msg_word = 0; #ifndef REMOVE_PKT_LOG if (ol_cfg_is_packet_log_enabled(pdev->ctrl_pdev)) { enable_ctrl_data = 1; enable_mgmt_data = 1; enable_null_data = 1; enable_phy_data = 1; enable_hdr = 1; enable_ppdu_start= 1; enable_ppdu_end = 1; /* Disable ASPM when pkt log is enabled */ adf_os_print("Pkt log is enabled\n"); htt_htc_disable_aspm(); } else { adf_os_print("Pkt log is disabled\n"); enable_ctrl_data = 0; enable_mgmt_data = 0; enable_null_data = 0; enable_phy_data = 0; enable_hdr = 0; enable_ppdu_start= 0; enable_ppdu_end = 0; } #else enable_ctrl_data = 0; enable_mgmt_data = 0; enable_null_data = 0; enable_phy_data = 0; enable_hdr = 0; enable_ppdu_start= 0; enable_ppdu_end = 0; #endif HTT_RX_RING_CFG_ENABLED_802_11_HDR_SET(*msg_word, enable_hdr); HTT_RX_RING_CFG_ENABLED_MSDU_PAYLD_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_PPDU_START_SET(*msg_word, enable_ppdu_start); HTT_RX_RING_CFG_ENABLED_PPDU_END_SET(*msg_word, enable_ppdu_end); HTT_RX_RING_CFG_ENABLED_MPDU_START_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_MPDU_END_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_MSDU_START_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_MSDU_END_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_RX_ATTN_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_FRAG_INFO_SET(*msg_word, 1); /* always present? */ HTT_RX_RING_CFG_ENABLED_UCAST_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_MCAST_SET(*msg_word, 1); /* Must change to dynamic enable at run time * rather than at compile time */ HTT_RX_RING_CFG_ENABLED_CTRL_SET(*msg_word, enable_ctrl_data); HTT_RX_RING_CFG_ENABLED_MGMT_SET(*msg_word, enable_mgmt_data); HTT_RX_RING_CFG_ENABLED_NULL_SET(*msg_word, enable_null_data); HTT_RX_RING_CFG_ENABLED_PHY_SET(*msg_word, enable_phy_data); HTT_RX_RING_CFG_IDX_INIT_VAL_SET(*msg_word, *pdev->rx_ring.alloc_idx.vaddr); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_802_11_HDR_SET(*msg_word, RX_STD_DESC_HDR_STATUS_OFFSET_DWORD); HTT_RX_RING_CFG_OFFSET_MSDU_PAYLD_SET(*msg_word, HTT_RX_STD_DESC_RESERVATION_DWORD); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_PPDU_START_SET(*msg_word, RX_STD_DESC_PPDU_START_OFFSET_DWORD); HTT_RX_RING_CFG_OFFSET_PPDU_END_SET(*msg_word, RX_STD_DESC_PPDU_END_OFFSET_DWORD); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_MPDU_START_SET(*msg_word, RX_STD_DESC_MPDU_START_OFFSET_DWORD); HTT_RX_RING_CFG_OFFSET_MPDU_END_SET(*msg_word, RX_STD_DESC_MPDU_END_OFFSET_DWORD); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_MSDU_START_SET(*msg_word, RX_STD_DESC_MSDU_START_OFFSET_DWORD); HTT_RX_RING_CFG_OFFSET_MSDU_END_SET(*msg_word, RX_STD_DESC_MSDU_END_OFFSET_DWORD); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_RX_ATTN_SET(*msg_word, RX_STD_DESC_ATTN_OFFSET_DWORD); HTT_RX_RING_CFG_OFFSET_FRAG_INFO_SET(*msg_word, RX_STD_DESC_FRAG_INFO_OFFSET_DWORD); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, HTC_TX_PACKET_TAG_RUNTIME_PUT); SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) htt_htc_misc_pkt_list_add(pdev, pkt); #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif return A_OK; }
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int uc_tx_buf_sz, unsigned int uc_tx_buf_cnt, unsigned int uc_tx_partition_base) { unsigned int tx_buffer_count; unsigned int tx_buffer_count_pwr2; adf_nbuf_t buffer_vaddr; u_int32_t buffer_paddr; u_int32_t *header_ptr; u_int32_t *ring_vaddr; int return_code = 0; uint16_t idx; /* Allocate CE Write Index WORD */ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, 4, &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) { adf_os_print("%s: CE Write Index WORD alloc fail", __func__); return -1; } /* Allocate TX COMP Ring */ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, uc_tx_buf_cnt * 4, &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) { adf_os_print("%s: TX COMP ring alloc fail", __func__); return_code = -2; goto free_tx_ce_idx; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4); /* Allocate TX BUF vAddress Storage */ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg = (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) { adf_os_print("%s: TX BUF POOL vaddr storage alloc fail", __func__); return_code = -3; goto free_tx_comp_base; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr; /* Allocate TX buffers as many as possible */ for (tx_buffer_count = 0; tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) { buffer_vaddr = adf_nbuf_alloc(pdev->osdev, uc_tx_buf_sz, 0, 4, FALSE); if (!buffer_vaddr) { adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d", __func__, tx_buffer_count); break; } /* Init buffer */ adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz); header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr); *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT; header_ptr++; *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16; adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL); buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0); header_ptr++; *header_ptr = (u_int32_t)(buffer_paddr + 16); header_ptr++; *header_ptr = 0xFFFFFFFF; /* FRAG Header */ header_ptr++; *header_ptr = buffer_paddr + 32; *ring_vaddr = buffer_paddr; pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] = buffer_vaddr; /* Memory barrier to ensure actual value updated */ ring_vaddr++; } /* * Tx complete ring buffer count should be power of 2. * So, allocated Tx buffer count should be one less than ring buffer size. */ tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1; if (tx_buffer_count > tx_buffer_count_pwr2) { adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d", __func__, tx_buffer_count, tx_buffer_count_pwr2); /* Free over allocated buffers below power of 2 */ for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) { if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) { adf_nbuf_unmap(pdev->osdev, pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx], ADF_OS_DMA_FROM_DEVICE); adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]); } } } if (tx_buffer_count_pwr2 < 0) { adf_os_print("%s: Failed to round down Tx buffer count %d", __func__, tx_buffer_count_pwr2); goto free_tx_comp_base; } pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2; return 0; free_tx_comp_base: adf_os_mem_free_consistent(pdev->osdev, ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4, pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); free_tx_ce_idx: adf_os_mem_free_consistent(pdev->osdev, 4, pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr, pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); return return_code; }
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int uc_tx_buf_sz, unsigned int uc_tx_buf_cnt, unsigned int uc_tx_partition_base) { unsigned int tx_buffer_count; adf_nbuf_t buffer_vaddr; u_int32_t buffer_paddr; u_int32_t *header_ptr; u_int32_t *ring_vaddr; int return_code = 0; /* Allocate CE Write Index WORD */ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, 4, &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) { adf_os_print("%s: CE Write Index WORD alloc fail", __func__); return -1; } /* Allocate TX COMP Ring */ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, uc_tx_buf_cnt * 4, &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) { adf_os_print("%s: TX COMP ring alloc fail", __func__); return_code = -2; goto free_tx_ce_idx; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4); /* Allocate TX BUF vAddress Storage */ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg = (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) { adf_os_print("%s: TX BUF POOL vaddr storage alloc fail", __func__); return_code = -3; goto free_tx_comp_base; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr; /* Allocate TX buffers as many as possible */ for (tx_buffer_count = 0; tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) { buffer_vaddr = adf_nbuf_alloc(pdev->osdev, uc_tx_buf_sz, 0, 4, FALSE); if (!buffer_vaddr) { adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d", __func__, tx_buffer_count); return 0; } /* Init buffer */ adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz); header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr); *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT; header_ptr++; *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16; adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL); buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0); header_ptr++; *header_ptr = (u_int32_t)(buffer_paddr + 16); header_ptr++; *header_ptr = 0xFFFFFFFF; /* FRAG Header */ header_ptr++; *header_ptr = buffer_paddr + 32; *ring_vaddr = buffer_paddr; pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] = buffer_vaddr; /* Memory barrier to ensure actual value updated */ ring_vaddr++; } pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count; return 0; free_tx_comp_base: adf_os_mem_free_consistent(pdev->osdev, ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4, pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); free_tx_ce_idx: adf_os_mem_free_consistent(pdev->osdev, 4, pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr, pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); return return_code; }
A_STATUS htt_h2t_rx_ring_cfg_msg_hl(struct htt_pdev_t *pdev) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_ERROR; /* failure */ } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_RX_RING_CFG_BYTES(1)), /* reserve room for the HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_ERROR; /* failure */ } /* * Set the length of the message. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added * separately during the below call to adf_nbuf_push_head. * The contribution from the HTC header is added separately inside HTC. */ adf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1)); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_CFG); HTT_RX_RING_CFG_NUM_RINGS_SET(*msg_word, 1); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_SET( *msg_word, pdev->rx_ring.alloc_idx.paddr); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_BASE_PADDR_SET(*msg_word, pdev->rx_ring.base_paddr); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_LEN_SET(*msg_word, pdev->rx_ring.size); HTT_RX_RING_CFG_BUF_SZ_SET(*msg_word, HTT_RX_BUF_SIZE); /* FIX THIS: if the FW creates a complete translated rx descriptor, then the MAC DMA of the HW rx descriptor should be disabled. */ msg_word++; *msg_word = 0; HTT_RX_RING_CFG_ENABLED_802_11_HDR_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MSDU_PAYLD_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_PPDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_PPDU_END_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MPDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MPDU_END_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MSDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MSDU_END_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_RX_ATTN_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_FRAG_INFO_SET(*msg_word, 0); /* always present? */ HTT_RX_RING_CFG_ENABLED_UCAST_SET(*msg_word, 1); HTT_RX_RING_CFG_ENABLED_MCAST_SET(*msg_word, 1); /* Must change to dynamic enable at run time * rather than at compile time */ HTT_RX_RING_CFG_ENABLED_CTRL_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_MGMT_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_NULL_SET(*msg_word, 0); HTT_RX_RING_CFG_ENABLED_PHY_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_802_11_HDR_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_MSDU_PAYLD_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_PPDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_PPDU_END_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_MPDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_MPDU_END_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_MSDU_START_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_MSDU_END_SET(*msg_word, 0); msg_word++; *msg_word = 0; HTT_RX_RING_CFG_OFFSET_RX_ATTN_SET(*msg_word, 0); HTT_RX_RING_CFG_OFFSET_FRAG_INFO_SET(*msg_word, 0); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, 1); /* tag - not relevant here */ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) { htt_htc_misc_pkt_list_add(pdev, pkt); } #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if (!pdev->cfg.default_tx_comp_req) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return A_OK; }
static void usb_hif_usb_recv_bundle_complete(struct urb *urb) { HIF_URB_CONTEXT *urb_context = (HIF_URB_CONTEXT *) urb->context; A_STATUS status = A_OK; adf_nbuf_t buf = NULL; HIF_USB_PIPE *pipe = urb_context->pipe; A_UINT8 *netdata, *netdata_new; A_UINT32 netlen, netlen_new; HTC_FRAME_HDR *HtcHdr; A_UINT16 payloadLen; adf_nbuf_t new_skb = NULL; AR_DEBUG_PRINTF(USB_HIF_DEBUG_BULK_IN, ( "+%s: recv pipe: %d, stat:%d,len:%d urb:0x%p\n", __func__, pipe->logical_pipe_num, urb->status, urb->actual_length, urb)); /* this urb is not pending anymore */ usb_hif_remove_pending_transfer(urb_context); do { if (urb->status != 0) { status = A_ECOMM; switch (urb->status) { case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* NOTE: no need to spew these errors when * device is removed * or urb is killed due to driver shutdown */ status = A_ECANCELED; break; default: AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ( "%s recv pipe: %d (ep:0x%2.2X), failed:%d\n", __func__, pipe->logical_pipe_num, pipe->ep_address, urb->status)); break; } break; } if (urb->actual_length == 0) break; buf = urb_context->buf; if (AR_DEBUG_LVL_CHECK(USB_HIF_DEBUG_DUMP_DATA)) { A_UINT8 *data; A_UINT32 len; adf_nbuf_peek_header(buf, &data, &len); DebugDumpBytes(data, len, "hif recv data"); } adf_nbuf_peek_header(buf, &netdata, &netlen); netlen = urb->actual_length; do { #if defined(AR6004_1_0_ALIGN_WAR) A_UINT8 extra_pad; A_UINT16 act_frame_len; #endif A_UINT16 frame_len; /* Hack into HTC header for bundle processing */ HtcHdr = (HTC_FRAME_HDR *) netdata; if (HtcHdr->EndpointID >= ENDPOINT_MAX) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("athusb: Rx: invalid EndpointID=%d\n", HtcHdr->EndpointID)); break; } payloadLen = HtcHdr->PayloadLen; payloadLen = A_LE2CPU16(payloadLen); #if defined(AR6004_1_0_ALIGN_WAR) act_frame_len = (HTC_HDR_LENGTH + payloadLen); if (HtcHdr->EndpointID == 0 || HtcHdr->EndpointID == 1) { /* assumption: target won't pad on HTC endpoint * 0 & 1. */ extra_pad = 0; } else { extra_pad = A_GET_UINT8_FIELD((A_UINT8 *) HtcHdr, HTC_FRAME_HDR, ControlBytes[1]); } #endif if (payloadLen > HIF_USB_RX_BUFFER_SIZE) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("athusb: payloadLen too long %u\n", payloadLen)); break; } #if defined(AR6004_1_0_ALIGN_WAR) frame_len = (act_frame_len + extra_pad); #else frame_len = (HTC_HDR_LENGTH + payloadLen); #endif if (netlen >= frame_len) { /* allocate a new skb and copy */ #if defined(AR6004_1_0_ALIGN_WAR) new_skb = adf_nbuf_alloc(NULL, act_frame_len, 0, 4, FALSE); if (new_skb == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ( "athusb: allocate skb (len=%u) failed\n", act_frame_len)); break; } adf_nbuf_peek_header(new_skb, &netdata_new, &netlen_new); adf_os_mem_copy(netdata_new, netdata, act_frame_len); adf_nbuf_put_tail(new_skb, act_frame_len); #else new_skb = adf_nbuf_alloc(NULL, frame_len, 0, 4, FALSE); if (new_skb == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ( "athusb: allocate skb (len=%u) failed\n", frame_len)); break; } adf_nbuf_peek_header(new_skb, &netdata_new, &netlen_new); adf_os_mem_copy(netdata_new, netdata, frame_len); adf_nbuf_put_tail(new_skb, frame_len); #endif skb_queue_tail(&pipe->io_comp_queue, new_skb); new_skb = NULL; netdata += frame_len; netlen -= frame_len; } else { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ( "athusb: subframe length %d not fitted into bundle packet length %d\n" , netlen, frame_len)); break; } } while (netlen); schedule_work(&pipe->io_complete_work); } while (FALSE); if (urb_context->buf == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("athusb: buffer in urb_context is NULL\n")); } /* reset urb_context->buf ==> seems not necessary */ usb_hif_free_urb_to_pipe(urb_context->pipe, urb_context); if (A_SUCCESS(status)) { if (pipe->urb_cnt >= pipe->urb_cnt_thresh) { /* our free urbs are piling up, post more transfers */ usb_hif_post_recv_bundle_transfers(pipe, 0 /* pass zero for not allocating urb-buffer again */ ); } } AR_DEBUG_PRINTF(USB_HIF_DEBUG_BULK_IN, ("-%s\n", __func__)); }
int htt_h2t_dbg_stats_get( struct htt_pdev_t *pdev, u_int32_t stats_type_upload_mask, u_int32_t stats_type_reset_mask, u_int8_t cfg_stat_type, u_int32_t cfg_val, u_int64_t cookie) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; uint16_t htc_tag = 1; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return -1; /* failure */ } if (stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS || stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) { /* FIX THIS - add more details? */ adf_os_print("%#x %#x stats not supported\n", stats_type_upload_mask, stats_type_reset_mask); return -1; /* failure */ } if (stats_type_reset_mask) htc_tag = HTC_TX_PACKET_TAG_RUNTIME_PUT; /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_STATS_REQ_MSG_SZ), /* reserve room for HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return -1; /* failure */ } /* set the length of the message */ adf_nbuf_put_tail(msg, HTT_H2T_STATS_REQ_MSG_SZ); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_STATS_REQ); HTT_H2T_STATS_REQ_UPLOAD_TYPES_SET(*msg_word, stats_type_upload_mask); msg_word++; *msg_word = 0; HTT_H2T_STATS_REQ_RESET_TYPES_SET(*msg_word, stats_type_reset_mask); msg_word++; *msg_word = 0; HTT_H2T_STATS_REQ_CFG_VAL_SET(*msg_word, cfg_val); HTT_H2T_STATS_REQ_CFG_STAT_TYPE_SET(*msg_word, cfg_stat_type); /* cookie LSBs */ msg_word++; *msg_word = cookie & 0xffffffff; /* cookie MSBs */ msg_word++; *msg_word = cookie >> 32; SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, htc_tag); SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) { htt_htc_misc_pkt_list_add(pdev, pkt); } #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif if ((pdev->cfg.is_high_latency) && (!pdev->cfg.default_tx_comp_req)) { ol_tx_target_credit_update(pdev->txrx_pdev, -1); } return 0; }
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; u_int32_t *msg_word; pkt = htt_htc_pkt_alloc(pdev); if (!pkt) { return A_NO_MEMORY; } /* show that this is not a tx frame download (not required, but helpful) */ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID; pkt->pdev_ctxt = NULL; /* not used during send-done callback */ msg = adf_nbuf_alloc( pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ), /* reserve room for HTC header */ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE); if (!msg) { htt_htc_pkt_free(pdev, pkt); return A_NO_MEMORY; } /* set the length of the message */ adf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ); /* fill in the message contents */ msg_word = (u_int32_t *) adf_nbuf_data(msg); /* rewind beyond alignment pad to get to the HTC header reserved area */ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); *msg_word = 0; HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word, pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt); HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG); msg_word++; *msg_word = 0; HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_SET(*msg_word, (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr); msg_word++; *msg_word = 0; HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word, (unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev)); msg_word++; *msg_word = 0; HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_SET(*msg_word, (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr); msg_word++; *msg_word = 0; HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_SET(*msg_word, (unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr); msg_word++; *msg_word = 0; HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_SET(*msg_word, (unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr); msg_word++; *msg_word = 0; HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word, (unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev)); msg_word++; *msg_word = 0; HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_SET(*msg_word, (unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr); msg_word++; *msg_word = 0; HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_SET(*msg_word, (unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr); SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, htt_h2t_send_complete_free_netbuf, adf_nbuf_data(msg), adf_nbuf_len(msg), pdev->htc_endpoint, HTC_TX_PACKET_TAG_RUNTIME_PUT); SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); #ifdef ATH_11AC_TXCOMPACT if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK) htt_htc_misc_pkt_list_add(pdev, pkt); #else HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt); #endif return A_OK; }