/* WMI Event handler register API */ int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, WMI_EVT_ID event_id) { u_int32_t idx = 0; for (idx = 0; (idx < wmi_handle->max_event_idx && idx < WMI_UNIFIED_MAX_EVENT); ++idx) { if (wmi_handle->event_id[idx] == event_id && wmi_handle->event_handler[idx] != NULL ) { return idx; } } return -1; } int wmi_unified_register_event_handler(wmi_unified_t wmi_handle, WMI_EVT_ID event_id, wmi_unified_event_handler handler_func) { u_int32_t idx=0; if ( wmi_unified_get_event_handler_ix( wmi_handle, event_id) != -1) { printk("%s : event handler already registered 0x%x \n", __func__, event_id); return -1; } if ( wmi_handle->max_event_idx == WMI_UNIFIED_MAX_EVENT ) { printk("%s : no more event handlers 0x%x \n", __func__, event_id); return -1; } idx=wmi_handle->max_event_idx; wmi_handle->event_handler[idx] = handler_func; wmi_handle->event_id[idx] = event_id; wmi_handle->max_event_idx++; return 0; } int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, WMI_EVT_ID event_id) { u_int32_t idx=0; if ( (idx = wmi_unified_get_event_handler_ix( wmi_handle, event_id)) == -1) { printk("%s : event handler is not registered: event id 0x%x \n", __func__, event_id); return -1; } wmi_handle->event_handler[idx] = NULL; wmi_handle->event_id[idx] = 0; --wmi_handle->max_event_idx; wmi_handle->event_handler[idx] = wmi_handle->event_handler[wmi_handle->max_event_idx]; wmi_handle->event_id[idx] = wmi_handle->event_id[wmi_handle->max_event_idx] ; return 0; } #if 0 /* currently not used */ static int wmi_unified_event_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) { u_int32_t id; u_int8_t *event; u_int16_t len; int status = -1; u_int32_t idx = 0; ASSERT(evt_buf != NULL); id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) goto end; idx = wmi_unified_get_event_handler_ix(wmi_handle, id); if (idx == -1) { pr_err("%s : event handler is not registered: event id: 0x%x\n", __func__, id); goto end; } event = adf_nbuf_data(evt_buf); len = adf_nbuf_len(evt_buf); /* Call the WMI registered event handler */ status = wmi_handle->event_handler[idx](wmi_handle->scn_handle, event, len); end: adf_nbuf_free(evt_buf); return status; }
A_STATUS HTCIssueSend(HTC_TARGET *target, adf_nbuf_t hdr_buf, adf_nbuf_t netbuf, a_uint8_t SendFlags, a_uint16_t len, a_uint8_t EpID) { a_uint8_t pipeID; A_STATUS status = A_OK; //adf_net_handle_t anet = target->pInstanceContext; HTC_ENDPOINT *pEndpoint = &target->EndPoint[EpID]; HTC_FRAME_HDR *HtcHdr; adf_nbuf_t tmp_nbuf; //printk("bharath %s \n",__FUNCTION__); if (hdr_buf == ADF_NBUF_NULL) { /* HTC header needs to be added on the data nbuf */ tmp_nbuf = netbuf; } else { tmp_nbuf = hdr_buf; } /* setup HTC frame header */ HtcHdr = (HTC_FRAME_HDR *)adf_nbuf_push_head(tmp_nbuf, sizeof(HTC_FRAME_HDR)); adf_os_assert(HtcHdr); if ( HtcHdr == NULL ) { adf_os_print("%s_%d: HTC Header is NULL !!!\n", __FUNCTION__, __LINE__); return A_ERROR; } HtcHdr->EndpointID = EpID; HtcHdr->Flags = SendFlags; HtcHdr->PayloadLen = adf_os_htons(len); HTC_ADD_CREDIT_SEQNO(target,pEndpoint,len,HtcHdr); /* lookup the pipe id by the endpoint id */ pipeID = pEndpoint->UL_PipeID; // if (EpID == ENDPOINT0) { /* send the buffer to the HIF layer */ status = HIFSend(target->hif_dev/*anet*/, pipeID, hdr_buf, netbuf); // } #ifdef NBUF_PREALLOC_POOL if ( A_FAILED(status) ) { adf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH); } #endif return status; }
/* * Completion routine for ALL HIF layer async I/O */ A_STATUS HIFDevRWCompletionHandler(void *context, A_STATUS status) { struct HIFSendContext *pSendContext = (struct HIFSendContext *)context; unsigned int transferID = pSendContext->transferID; HIF_SDIO_DEVICE *pDev = pSendContext->pDev; adf_nbuf_t buf = pSendContext->netbuf; if (pSendContext->bNewAlloc){ adf_os_mem_free((void*)pSendContext); } else { adf_nbuf_pull_head(buf, pSendContext->head_data_len); } if (pDev->hif_callbacks.txCompletionHandler) { pDev->hif_callbacks.txCompletionHandler(pDev->hif_callbacks.Context, buf, transferID); } return A_OK; }
LOCAL void HTCSendDoneHandler(adf_nbuf_t buf, void *context) { A_UINT8 current_eid; HTC_CONTEXT *pHTC = (HTC_CONTEXT *)context; HTC_BUF_CONTEXT *ctx; ctx = (HTC_BUF_CONTEXT *)adf_nbuf_get_priv(buf); current_eid = ctx->end_point; /* Walk through the buffers and fixup the ones we used for HTC headers. * The buffer list may contain more than one string of HTC buffers comprising of an * HTC message so we need to check every buffer */ adf_nbuf_pull_head(buf, HTC_HDR_LENGTH); pHTC->Endpoints[current_eid].pService-> ProcessSendBufferComplete(current_eid, buf, pHTC->Endpoints[current_eid].pService->ServiceCtx); }
static inline void ol_rx_fwd_to_tx(struct ol_txrx_vdev_t *vdev, adf_nbuf_t msdu) { struct ol_txrx_pdev_t *pdev = vdev->pdev; if (pdev->frame_format == wlan_frm_fmt_native_wifi) { ol_ap_fwd_check(vdev, msdu); } /* * Map the netbuf, so it's accessible to the DMA that * sends it to the target. */ adf_nbuf_set_next(msdu, NULL); /* add NULL terminator */ /* for HL, point to payload before send to tx again.*/ if (pdev->cfg.is_high_latency) { void *rx_desc; rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu); adf_nbuf_pull_head(msdu, htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev, rx_desc)); adf_nbuf_set_fwd_flag(msdu, ADF_NBUF_FWD_FLAG); } msdu = vdev->tx(vdev, msdu); if (msdu) { /* * The frame was not accepted by the tx. * We could store the frame and try again later, * but the simplest solution is to discard the frames. */ adf_nbuf_tx_free(msdu, ADF_NBUF_PKT_ERROR); } }
A_STATUS HIFDevSendBuffer(HIF_SDIO_DEVICE *pDev, unsigned int transferID, a_uint8_t pipe, unsigned int nbytes, adf_nbuf_t buf) { A_STATUS status; A_UINT32 paddedLength; int frag_count = 0, i, head_data_len; struct HIFSendContext *pSendContext; unsigned char *pData; A_UINT32 request = HIF_WR_ASYNC_BLOCK_INC; A_UINT8 mboxIndex = HIFDevMapPipeToMailBox(pDev, pipe); paddedLength = DEV_CALC_SEND_PADDED_LEN(pDev, nbytes); #ifdef ENABLE_MBOX_DUMMY_SPACE_FEATURE A_ASSERT(paddedLength - nbytes < HIF_DUMMY_SPACE_MASK + 1); /* * two most significant bytes to save dummy data count * data written into the dummy space will not put into the final mbox FIFO * */ request |= ((paddedLength - nbytes) << 16); #endif frag_count = adf_nbuf_get_num_frags(buf); if (frag_count > 1){ /* header data length should be total sending length substract internal data length of netbuf */ /* * | HIFSendContext | fragments except internal buffer | netbuf->data */ head_data_len = sizeof(struct HIFSendContext) + (nbytes - adf_nbuf_get_frag_len(buf, frag_count - 1)); } else { /* * | HIFSendContext | netbuf->data */ head_data_len = sizeof(struct HIFSendContext); } /* Check whether head room is enough to save extra head data */ if ((head_data_len <= adf_nbuf_headroom(buf)) && (adf_nbuf_tailroom(buf) >= (paddedLength - nbytes))){ pSendContext = (struct HIFSendContext*)adf_nbuf_push_head(buf, head_data_len); pSendContext->bNewAlloc = FALSE; } else { pSendContext = (struct HIFSendContext*)adf_os_mem_alloc(NULL, sizeof(struct HIFSendContext) + paddedLength); pSendContext->bNewAlloc = TRUE; } pSendContext->netbuf = buf; pSendContext->pDev = pDev; pSendContext->transferID = transferID; pSendContext->head_data_len = head_data_len; /* * Copy data to head part of netbuf or head of allocated buffer. * if buffer is new allocated, the last buffer should be copied also. * It assume last fragment is internal buffer of netbuf * sometime total length of fragments larger than nbytes */ pData = (unsigned char *)pSendContext + sizeof(struct HIFSendContext); for (i = 0; i < (pSendContext->bNewAlloc ? frag_count : frag_count - 1); i ++){ int frag_len = adf_nbuf_get_frag_len(buf, i); unsigned char *frag_addr = adf_nbuf_get_frag_vaddr(buf, i); if (frag_len > nbytes){ frag_len = nbytes; } memcpy(pData, frag_addr, frag_len); pData += frag_len; nbytes -= frag_len; if (nbytes <= 0) { break; } } /* Reset pData pointer and send out */ pData = (unsigned char *)pSendContext + sizeof(struct HIFSendContext); status = HIFReadWrite(pDev->HIFDevice, pDev->MailBoxInfo.MboxProp[mboxIndex].ExtendedAddress, (char*) pData, paddedLength, request, (void*)pSendContext); if (status == A_PENDING){ /* * it will return A_PENDING in native HIF implementation, * which should be treated as successful result here. */ status = A_OK; } /* release buffer or move back data pointer when failed */ if (status != A_OK){ if (pSendContext->bNewAlloc){ adf_os_mem_free(pSendContext); } else { adf_nbuf_pull_head(buf, head_data_len); } } return status; }
/* callback from the mailbox hardware layer when a full message arrives */ LOCAL void HTCMsgRecvHandler(adf_nbuf_t hdr_buf, adf_nbuf_t buffer, void *context) { A_UINT16 totsz; HTC_ENDPOINT *pEndpoint; A_UINT32 eidMask; int eid; a_uint8_t *anbdata; a_uint32_t anblen; HTC_FRAME_HDR *pHTCHdr; HTC_CONTEXT *pHTC = (HTC_CONTEXT *)context; adf_nbuf_t tmp_nbuf; if (hdr_buf == ADF_NBUF_NULL) { /* HTC hdr is not in the hdr_buf */ tmp_nbuf = buffer; } else { tmp_nbuf = hdr_buf; } adf_nbuf_peek_header(tmp_nbuf, &anbdata, &anblen); pHTCHdr = (HTC_FRAME_HDR *)anbdata; totsz = adf_os_ntohs(pHTCHdr->PayloadLen); eid = pHTCHdr->EndpointID; pEndpoint = &pHTC->Endpoints[eid]; eidMask = 1 << eid; if (pHTCHdr->Flags & HTC_FLAGS_CREDIT_REDISTRIBUTION) { /* The pipe id where the credit is redistributed to is carried in Control * Byte 0 */ RedistributeCredit(tmp_nbuf, pHTCHdr->ControlBytes[0]); return; } if (pHTC->StateFlags & HTC_STATE_SETUP_COMPLETE) { /* after setup we keep track of credit consumption to allow us to * adjust thresholds to reduce credit dribbling */ pEndpoint->CreditsConsumed ++; } /* from the design document, we put the endpoint into a "host-needs-credit" state * when we receive a frame with the NEED_CREDIT_UPDATE flag set . * if the host received credits through an opportunistic path, then it can * issue a another frame with this bit cleared, this signals the target to clear * the "host-needs-credit" state */ if (pHTCHdr->Flags & HTC_FLAGS_NEED_CREDIT_UPDATE) { /* the host is running low (or is out) of credits on this * endpoint, update mask */ pHTC->EpHostNeedsCreditMap |= eidMask; /* check and set new threshold since host has reached a low credit situation */ CHECK_AND_ADJUST_CREDIT_THRESHOLD(pEndpoint); } else { /* clear the flag */ pHTC->EpHostNeedsCreditMap &= ~(eidMask); pEndpoint->CreditReturnThreshhold = 0; } /* Adjust the first buffer to point to the start of the actual payload, the first buffer contains the header */ adf_nbuf_pull_head(tmp_nbuf, HTC_HDR_LENGTH); /* NOTE : This callback could re-queue the recv buffers within this calling context. * The callback could also send a response message within the context of this callback * as the result of parsing this message. In either case, if there are * pending credits and the host needs them, a credit report will be sent either through * the response message trailer or a NULL message through HTC_ReturnBuffers(). */ pEndpoint->pService->ProcessRecvMsg(eid, hdr_buf, buffer, pEndpoint->pService->ServiceCtx); /* Calls to HTC_ReturnBuffers drives the endpoint credit reporting state machine. * We do not want to delay credits for too long in the event that the application is * holding onto buffers for excessive periods of time. This gives us "some" better * opportunities to send up credits. */ HTCCheckAndSendCreditReport(pHTC, eidMask, pEndpoint, eid); }
A_STATUS HTCRxCompletionHandler( void *Context, adf_nbuf_t netbuf, a_uint8_t pipeID) { A_STATUS status = A_OK; HTC_FRAME_HDR *HtcHdr; HTC_TARGET *target = (HTC_TARGET *)Context; a_uint8_t *netdata; a_uint32_t netlen; HTC_ENDPOINT *pEndpoint; HTC_PACKET *pPacket; A_UINT16 payloadLen; a_uint32_t trailerlen = 0; A_UINT8 htc_ep_id; #ifdef RX_SG_SUPPORT LOCK_HTC_RX(target); if (target->IsRxSgInprogress) { target->CurRxSgTotalLen += adf_nbuf_len(netbuf); adf_nbuf_queue_add(&target->RxSgQueue, netbuf); if (target->CurRxSgTotalLen == target->ExpRxSgTotalLen) { netbuf = RxSgToSingleNetbuf(target); if (netbuf == NULL) { UNLOCK_HTC_RX(target); goto _out; } } else { netbuf = NULL; UNLOCK_HTC_RX(target); goto _out; } } UNLOCK_HTC_RX(target); #endif netdata = adf_nbuf_data(netbuf); netlen = adf_nbuf_len(netbuf); HtcHdr = (HTC_FRAME_HDR *)netdata; do { htc_ep_id = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, ENDPOINTID); pEndpoint = &target->EndPoint[htc_ep_id]; if (htc_ep_id >= ENDPOINT_MAX) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx: invalid EndpointID=%d\n",htc_ep_id)); DebugDumpBytes((A_UINT8 *)HtcHdr,sizeof(HTC_FRAME_HDR),"BAD HTC Header"); status = A_ERROR; break; } /* * If this endpoint that received a message from the target has * a to-target HIF pipe whose send completions are polled rather * than interrupt-driven, this is a good point to ask HIF to check * whether it has any completed sends to handle. */ if (pEndpoint->ul_is_polled) { HTCSendCompleteCheck(pEndpoint, 1); } payloadLen = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, PAYLOADLEN); if (netlen < (payloadLen + HTC_HDR_LENGTH)) { #ifdef RX_SG_SUPPORT LOCK_HTC_RX(target); target->IsRxSgInprogress = TRUE; adf_nbuf_queue_init(&target->RxSgQueue); adf_nbuf_queue_add(&target->RxSgQueue, netbuf); target->ExpRxSgTotalLen = (payloadLen + HTC_HDR_LENGTH); target->CurRxSgTotalLen += netlen; UNLOCK_HTC_RX(target); netbuf = NULL; break; #else AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx: insufficient length, got:%d expected =%d\n", netlen, payloadLen + HTC_HDR_LENGTH)); DebugDumpBytes((A_UINT8 *)HtcHdr,sizeof(HTC_FRAME_HDR),"BAD RX packet length"); status = A_ERROR; break; #endif } #ifdef HTC_EP_STAT_PROFILING LOCK_HTC_RX(target); INC_HTC_EP_STAT(pEndpoint,RxReceived,1); UNLOCK_HTC_RX(target); #endif //if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { { A_UINT8 temp; /* get flags to check for trailer */ temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, FLAGS); if (temp & HTC_FLAGS_RECV_TRAILER) { /* extract the trailer length */ temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, CONTROLBYTES0); if ((temp < sizeof(HTC_RECORD_HDR)) || (temp > payloadLen)) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HTCProcessRecvHeader, invalid header (payloadlength should be :%d, CB[0] is:%d) \n", payloadLen, temp)); status = A_EPROTO; break; } trailerlen = temp; /* process trailer data that follows HDR + application payload */ status = HTCProcessTrailer(target, ((A_UINT8 *)HtcHdr + HTC_HDR_LENGTH + payloadLen - temp), temp, htc_ep_id); if (A_FAILED(status)) { break; } } } if (((int)payloadLen - (int)trailerlen) <= 0) { /* zero length packet with trailer data, just drop these */ break; } if (htc_ep_id == ENDPOINT_0) { A_UINT16 message_id; HTC_UNKNOWN_MSG *htc_msg; /* remove HTC header */ adf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH); netdata = adf_nbuf_data(netbuf); netlen = adf_nbuf_len(netbuf); htc_msg = (HTC_UNKNOWN_MSG*)netdata; message_id = HTC_GET_FIELD(htc_msg, HTC_UNKNOWN_MSG, MESSAGEID); switch (message_id) { default: /* handle HTC control message */ if (target->CtrlResponseProcessing) { /* this is a fatal error, target should not be sending unsolicited messages * on the endpoint 0 */ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx Ctrl still processing\n")); status = A_ERROR; break; } LOCK_HTC_RX(target); target->CtrlResponseLength = min((int)netlen,HTC_MAX_CONTROL_MESSAGE_LENGTH); A_MEMCPY(target->CtrlResponseBuffer,netdata,target->CtrlResponseLength); UNLOCK_HTC_RX(target); adf_os_mutex_release(target->osdev, &target->CtrlResponseValid); break; case HTC_MSG_SEND_SUSPEND_COMPLETE: target->HTCInitInfo.TargetSendSuspendComplete(target->HTCInitInfo.pContext); break; } adf_nbuf_free(netbuf); netbuf = NULL; break; } /* the current message based HIF architecture allocates net bufs for recv packets * since this layer bridges that HIF to upper layers , which expects HTC packets, * we form the packets here * TODO_FIXME */ pPacket = AllocateHTCPacketContainer(target); if (NULL == pPacket) { status = A_NO_RESOURCE; break; } pPacket->Status = A_OK; pPacket->Endpoint = htc_ep_id; pPacket->pPktContext = netbuf; pPacket->pBuffer = adf_nbuf_data(netbuf) + HTC_HDR_LENGTH; pPacket->ActualLength = netlen - HTC_HEADER_LEN - trailerlen; /* TODO : this is a hack because the driver layer will set the actual length * of the skb again which will just double the length */ //A_NETBUF_TRIM(netbuf,netlen); adf_nbuf_trim_tail(netbuf, netlen); RecvPacketCompletion(target,pEndpoint,pPacket); /* recover the packet container */ FreeHTCPacketContainer(target,pPacket); netbuf = NULL; } while(FALSE); #ifdef RX_SG_SUPPORT _out: #endif if (netbuf != NULL) { adf_nbuf_free(netbuf); } return status; }
void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) { u_int32_t id; u_int8_t *data; u_int32_t len; void *wmi_cmd_struct_ptr = NULL; int tlv_ok_status = 0; id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) goto end; data = adf_nbuf_data(evt_buf); len = adf_nbuf_len(evt_buf); /* Validate and pad(if necessary) the TLVs */ tlv_ok_status = wmitlv_check_and_pad_event_tlvs(wmi_handle->scn_handle, data, len, id, &wmi_cmd_struct_ptr); if (tlv_ok_status != 0) { pr_err("%s: Error: id=0x%d, wmitlv_check_and_pad_tlvs ret=%d\n", __func__, id, tlv_ok_status); goto end; } #ifdef FEATURE_WLAN_D0WOW if (wmi_get_d0wow_flag(wmi_handle)) pr_debug("%s: WMI event ID is 0x%x\n", __func__, id); #endif if (id >= WMI_EVT_GRP_START_ID(WMI_GRP_START)) { u_int32_t idx = 0; idx = wmi_unified_get_event_handler_ix(wmi_handle, id) ; if (idx == -1) { pr_err("%s : event handler is not registered: event id 0x%x\n", __func__, id); goto end; } #ifdef WMI_INTERFACE_EVENT_LOGGING adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock); /* Exclude 4 bytes of TLV header */ WMI_EVENT_RECORD(id, ((u_int8_t *)data + 4)); adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif /* Call the WMI registered event handler */ wmi_handle->event_handler[idx](wmi_handle->scn_handle, wmi_cmd_struct_ptr, len); goto end; } switch (id) { default: pr_info("%s: Unhandled WMI event %d\n", __func__, id); break; case WMI_SERVICE_READY_EVENTID: pr_info("%s: WMI UNIFIED SERVICE READY event\n", __func__); wma_rx_service_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; case WMI_READY_EVENTID: pr_info("%s: WMI UNIFIED READY event\n", __func__); wma_rx_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; } end: wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); }
/* * Temporarily added to support older WMI events. We should move all events to unified * when the target is ready to support it. */ void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) { struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx; wmi_buf_t evt_buf; u_int32_t len; void *wmi_cmd_struct_ptr = NULL; u_int32_t idx = 0; int tlv_ok_status = 0; #if defined(WMI_INTERFACE_EVENT_LOGGING) || !defined(QCA_CONFIG_SMP) u_int32_t id; u_int8_t *data; #endif evt_buf = (wmi_buf_t) htc_packet->pPktContext; id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); /* TX_PAUSE EVENT should be handled with tasklet context */ if ((WMI_TX_PAUSE_EVENTID == id) || (WMI_WOW_WAKEUP_HOST_EVENTID == id)) { if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) return; data = adf_nbuf_data(evt_buf); len = adf_nbuf_len(evt_buf); tlv_ok_status = wmitlv_check_and_pad_event_tlvs( wmi_handle->scn_handle, data, len, id, &wmi_cmd_struct_ptr); if (tlv_ok_status != 0) { if (tlv_ok_status == 1) { wmi_cmd_struct_ptr = data; } else { return; } } idx = wmi_unified_get_event_handler_ix(wmi_handle, id); if (idx == -1) { wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); return; } wmi_handle->event_handler[idx](wmi_handle->scn_handle, wmi_cmd_struct_ptr, len); wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); return; } #ifdef WMI_INTERFACE_EVENT_LOGGING id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); data = adf_nbuf_data(evt_buf); adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock); /* Exclude 4 bytes of TLV header */ WMI_RX_EVENT_RECORD(id, ((u_int8_t *)data + 4)); adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif adf_os_spin_lock_bh(&wmi_handle->eventq_lock); adf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); adf_os_spin_unlock_bh(&wmi_handle->eventq_lock); schedule_work(&wmi_handle->rx_event_work); }
static inline A_STATUS ol_tx_encap_from_native_wifi ( struct ol_txrx_vdev_t *vdev, struct ol_tx_desc_t *tx_desc, adf_nbuf_t msdu, struct ol_txrx_msdu_info_t *tx_msdu_info ) { u_int8_t localbuf[sizeof(struct ieee80211_qosframe_htc_addr4)]; struct ieee80211_frame *wh; u_int8_t hdsize, new_hdsize; struct ieee80211_qoscntl *qos_cntl; struct ol_txrx_peer_t *peer; if (tx_msdu_info->htt.info.frame_type != htt_frm_type_data) { return A_OK; } peer = tx_msdu_info->peer; /* * for unicast,the peer should not be NULL. * for multicast, the peer is AP. */ if (tx_msdu_info->htt.info.is_unicast && peer->qos_capable) { if (A_OK != ol_tx_copy_native_wifi_header(msdu, &hdsize, localbuf)) return A_ERROR; wh = (struct ieee80211_frame*)localbuf; /*add qos cntl*/ qos_cntl = (struct ieee80211_qoscntl*)(localbuf + hdsize); qos_cntl->i_qos[0] = tx_msdu_info->htt.info.ext_tid & IEEE80211_QOS_TID; #if 0 if ( wmmParam[ac].wmep_noackPolicy ) { qos_cntl->i_qos[0]|= 1 << IEEE80211_QOS_ACKPOLICY_S; } #endif qos_cntl->i_qos[1] = 0; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS; /* count for qos field */ new_hdsize = hdsize + sizeof(struct ieee80211_qosframe) - sizeof(struct ieee80211_frame); /*add ht control field if needed */ /* copy new hd to bd */ adf_os_mem_copy( (void*)htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, new_hdsize), localbuf, new_hdsize); adf_nbuf_pull_head(msdu, hdsize); tx_msdu_info->htt.info.l3_hdr_offset = new_hdsize; tx_desc->orig_l2_hdr_bytes = hdsize; } /* Set Protected Frame bit in MAC header */ if (vdev->pdev->sw_pf_proc_enable && tx_msdu_info->htt.action.do_encrypt) { if (tx_desc->orig_l2_hdr_bytes) { wh = (struct ieee80211_frame*)htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, tx_msdu_info->htt.info.l3_hdr_offset); } else { if (A_OK != ol_tx_copy_native_wifi_header(msdu, &hdsize, localbuf)) return A_ERROR; wh = (struct ieee80211_frame*)htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, hdsize); adf_os_mem_copy((void*)wh, localbuf, hdsize); adf_nbuf_pull_head(msdu, hdsize); tx_msdu_info->htt.info.l3_hdr_offset = hdsize; tx_desc->orig_l2_hdr_bytes = hdsize; } wh->i_fc[1] |= IEEE80211_FC1_WEP; } return A_OK; }
static inline void ol_rx_decap_to_8023 ( struct ol_txrx_vdev_t *vdev, adf_nbuf_t msdu, struct ol_rx_decap_info_t *info, struct ethernet_hdr_t *ethr_hdr) { struct llc_snap_hdr_t *llc_hdr; u_int16_t ether_type; u_int16_t l2_hdr_space; struct ieee80211_frame_addr4 *wh; u_int8_t local_buf[ETHERNET_HDR_LEN]; u_int8_t *buf; /* * populate Ethernet header, * if ethr_hdr is null, rx frame is 802.11 format(HW ft disabled) * if ethr_hdr is not null, rx frame is "subfrm of amsdu". */ buf = (u_int8_t *)adf_nbuf_data(msdu); llc_hdr = (struct llc_snap_hdr_t *)buf; ether_type = (llc_hdr->ethertype[0] << 8)|llc_hdr->ethertype[1]; /* do llc remove if needed */ l2_hdr_space = 0; if (IS_SNAP(llc_hdr)) { if (IS_BTEP(llc_hdr)) { /* remove llc*/ l2_hdr_space += sizeof(struct llc_snap_hdr_t); llc_hdr = NULL; } else if (IS_RFC1042(llc_hdr)) { if ( !(ether_type == ETHERTYPE_AARP || ether_type == ETHERTYPE_IPX) ) { /* remove llc*/ l2_hdr_space += sizeof(struct llc_snap_hdr_t); llc_hdr = NULL; } } } if (l2_hdr_space > ETHERNET_HDR_LEN) { buf = adf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN); } else if (l2_hdr_space < ETHERNET_HDR_LEN) { buf = adf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space); } /* normal msdu(non-subfrm of A-MSDU) if ethr_hdr is null */ if (ethr_hdr == NULL) { /* mpdu hdr should be present in info,re-create ethr_hdr based on mpdu hdr*/ TXRX_ASSERT2(info->hdr_len != 0); wh = (struct ieee80211_frame_addr4 *)info->hdr; ethr_hdr = (struct ethernet_hdr_t *)local_buf; switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: adf_os_mem_copy(ethr_hdr->dest_addr, wh->i_addr1, ETHERNET_ADDR_LEN); adf_os_mem_copy(ethr_hdr->src_addr, wh->i_addr2, ETHERNET_ADDR_LEN); break; case IEEE80211_FC1_DIR_TODS: adf_os_mem_copy(ethr_hdr->dest_addr, wh->i_addr3, ETHERNET_ADDR_LEN); adf_os_mem_copy(ethr_hdr->src_addr, wh->i_addr2, ETHERNET_ADDR_LEN); break; case IEEE80211_FC1_DIR_FROMDS: adf_os_mem_copy(ethr_hdr->dest_addr, wh->i_addr1, ETHERNET_ADDR_LEN); adf_os_mem_copy(ethr_hdr->src_addr, wh->i_addr3, ETHERNET_ADDR_LEN); break; case IEEE80211_FC1_DIR_DSTODS: adf_os_mem_copy(ethr_hdr->dest_addr, wh->i_addr3, ETHERNET_ADDR_LEN); adf_os_mem_copy(ethr_hdr->src_addr, wh->i_addr4, ETHERNET_ADDR_LEN); break; } } if (llc_hdr == NULL) { ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff; ethr_hdr->ethertype[1] = (ether_type) & 0xff; }
static inline A_STATUS ol_tx_encap_from_8023 ( struct ol_txrx_vdev_t *vdev, struct ol_tx_desc_t *tx_desc, adf_nbuf_t msdu, struct ol_txrx_msdu_info_t *tx_msdu_info ) { u_int8_t localbuf[ sizeof(struct ieee80211_qosframe_htc_addr4) \ + sizeof(struct llc_snap_hdr_t)]; struct llc_snap_hdr_t *llc_hdr; struct ethernet_hdr_t *eth_hdr; struct ieee80211_frame *wh; u_int8_t hdsize, new_l2_hdsize, new_hdsize; struct ieee80211_qoscntl *qos_cntl; const u_int8_t ethernet_II_llc_snap_header_prefix[] = \ { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; struct ol_txrx_peer_t *peer; u_int16_t ether_type; if (tx_msdu_info->htt.info.frame_type != htt_frm_type_data) return A_OK; /* * for unicast,the peer should not be NULL. * for multicast, the peer is AP. */ peer = tx_msdu_info->peer; eth_hdr = (struct ethernet_hdr_t *)adf_nbuf_data(msdu); hdsize = sizeof(struct ethernet_hdr_t); wh = (struct ieee80211_frame *)localbuf; wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; *(u_int16_t *)wh->i_dur = 0; new_hdsize = 0; switch (vdev->opmode) { case wlan_op_mode_ap: /* DA , BSSID , SA*/ adf_os_mem_copy(wh->i_addr1, eth_hdr->dest_addr, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr2, &vdev->mac_addr.raw, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr3, eth_hdr->src_addr, IEEE80211_ADDR_LEN); wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS; new_hdsize = sizeof(struct ieee80211_frame); break; case wlan_op_mode_ibss: /* DA, SA, BSSID */ adf_os_mem_copy(wh->i_addr1, eth_hdr->dest_addr, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr2, eth_hdr->src_addr, IEEE80211_ADDR_LEN); /* need to check the bssid behaviour for IBSS vdev */ adf_os_mem_copy(wh->i_addr3, &vdev->mac_addr.raw, IEEE80211_ADDR_LEN); wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; new_hdsize = sizeof(struct ieee80211_frame); break; case wlan_op_mode_sta: /* BSSID, SA , DA*/ adf_os_mem_copy(wh->i_addr1, &peer->mac_addr.raw, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr2, eth_hdr->src_addr, IEEE80211_ADDR_LEN); adf_os_mem_copy(wh->i_addr3, eth_hdr->dest_addr, IEEE80211_ADDR_LEN); wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; new_hdsize = sizeof(struct ieee80211_frame); break; case wlan_op_mode_monitor: default: return A_ERROR; } /*add qos cntl*/ if (tx_msdu_info->htt.info.is_unicast && peer->qos_capable ) { qos_cntl = (struct ieee80211_qoscntl*)(localbuf + new_hdsize); qos_cntl->i_qos[0] = tx_msdu_info->htt.info.ext_tid & IEEE80211_QOS_TID; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS; #if 0 if ( wmmParam[ac].wmep_noackPolicy ) { qos_cntl->i_qos[0]|= 1 << IEEE80211_QOS_ACKPOLICY_S; } #endif qos_cntl->i_qos[1] = 0; new_hdsize += sizeof(struct ieee80211_qoscntl); /*add ht control field if needed */ } /* Set Protected Frame bit in MAC header */ if (vdev->pdev->sw_pf_proc_enable && tx_msdu_info->htt.action.do_encrypt) { wh->i_fc[1] |= IEEE80211_FC1_WEP; } new_l2_hdsize = new_hdsize; /* add llc snap if needed */ if (vdev->pdev->sw_tx_llc_proc_enable) { llc_hdr = (struct llc_snap_hdr_t *) (localbuf + new_hdsize); ether_type = (eth_hdr->ethertype[0]<<8) |(eth_hdr->ethertype[1]); if ( ether_type >= IEEE8023_MAX_LEN ) { adf_os_mem_copy(llc_hdr, ethernet_II_llc_snap_header_prefix, sizeof(ethernet_II_llc_snap_header_prefix)); if ( ether_type == ETHERTYPE_AARP || ether_type == ETHERTYPE_IPX) { llc_hdr->org_code[2] = BTEP_SNAP_ORGCODE_2;// 0xf8; bridge tunnel header } llc_hdr->ethertype[0] = eth_hdr->ethertype[0]; llc_hdr->ethertype[1] = eth_hdr->ethertype[1]; new_hdsize += sizeof(struct llc_snap_hdr_t); } else { /*llc ready, and it's in payload pdu, do we need to move to BD pdu?*/ } } adf_os_mem_copy( (void*)htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,new_l2_hdsize), localbuf, new_hdsize); adf_nbuf_pull_head(msdu,hdsize); tx_msdu_info->htt.info.l3_hdr_offset = new_l2_hdsize; tx_desc->orig_l2_hdr_bytes = hdsize; return A_OK; }
hif_status_t HTCTxCompletionHandler(void *Context, adf_nbuf_t netbuf) { HTC_TARGET *target = (HTC_TARGET *)Context; //adf_os_handle_t os_hdl = target->os_handle; a_uint8_t *netdata; a_uint32_t netlen; HTC_FRAME_HDR *HtcHdr; a_uint8_t EpID; HTC_ENDPOINT *pEndpoint; #ifndef HTC_HOST_CREDIT_DIST a_int32_t i; #endif adf_nbuf_peek_header(netbuf, &netdata, &netlen); HtcHdr = (HTC_FRAME_HDR *)netdata; EpID = HtcHdr->EndpointID; pEndpoint = &target->EndPoint[EpID]; if (EpID == ENDPOINT0) { adf_nbuf_free(netbuf); } else { /* gather tx completion counts */ //HTC_AGGRNUM_REC *pAggrNumRec; //LOCK_HTC_TX(target); //pAggrNumRec = HTC_GET_REC_AT_HEAD(&pEndpoint->AggrNumRecQueue); //aggrNum = pAggrNumRec->AggrNum; //UNLOCK_HTC_TX(target); //if ((++pEndpoint->CompletedTxCnt) == aggrNum) { //pEndpoint->CompletedTxCnt = 0; #if 0 LOCK_HTC_TX(target); /* Dequeue from endpoint and then enqueue to target */ pAggrNumRec = HTC_AGGRNUMREC_DEQUEUE(&pEndpoint->AggrNumRecQueue); HTC_AGGRNUMREC_ENQUEUE(&target->FreeAggrNumRecQueue, pAggrNumRec); UNLOCK_HTC_TX(target); #endif /* remove HTC header */ adf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH); #if 1 /* freeing the net buffer instead of handing this buffer to upper layer driver */ /* nofity upper layer */ if (pEndpoint->EpCallBacks.EpTxComplete) { /* give the packet to the upper layer */ pEndpoint->EpCallBacks.EpTxComplete(/*dev*/pEndpoint->EpCallBacks.pContext, netbuf, EpID/*aggrNum*/); } else { adf_nbuf_free(netbuf); } #else adf_nbuf_free(netbuf); #endif //} } #ifndef HTC_HOST_CREDIT_DIST /* Check whether there is any pending buffer needed */ /* to be sent */ if (pEndpoint->UL_PipeID == 1) { if (HTCNeedReschedule(target, pEndpoint) == A_OK) { for (i = ENDPOINT_MAX - 1; i >= 0; i--) { pEndpoint = &target->EndPoint[i]; if (HTCGetTxBufCnt(target, pEndpoint) > 0) { HTCTrySend(target, NULL, ADF_NBUF_NULL, ADF_NBUF_NULL); break; } } } } #endif return HIF_OK; }