Пример #1
0
adf_nbuf_t RxSgToSingleNetbuf(HTC_TARGET *target)
{
    adf_nbuf_t skb;
    a_uint8_t *anbdata;
    a_uint8_t *anbdata_new;
    a_uint32_t anblen;
    adf_nbuf_t new_skb = NULL;
    a_uint32_t sg_queue_len;
    adf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue;

    sg_queue_len = adf_nbuf_queue_len(rx_sg_queue);

    if (sg_queue_len <= 1) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
            ("RxSgToSingleNetbuf: invalid sg queue len %u\n"));
        goto _failed;
    }

    new_skb = adf_nbuf_alloc(target->ExpRxSgTotalLen, 0, 4, FALSE);
    if (new_skb == NULL) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
            ("RxSgToSingleNetbuf: can't allocate %u size netbuf\n",
                target->ExpRxSgTotalLen));
        goto _failed;
    }

    adf_nbuf_peek_header(new_skb, &anbdata_new, &anblen);

    skb = adf_nbuf_queue_remove(rx_sg_queue);
    do {
        adf_nbuf_peek_header(skb, &anbdata, &anblen);
        adf_os_mem_copy(anbdata_new, anbdata, adf_nbuf_len(skb));
        adf_nbuf_put_tail(new_skb, adf_nbuf_len(skb));
        anbdata_new += adf_nbuf_len(skb);
        adf_nbuf_free(skb);
        skb = adf_nbuf_queue_remove(rx_sg_queue);
    } while(skb != NULL);

    RESET_RX_SG_CONFIG(target);
    return new_skb;

_failed:

    while ((skb = adf_nbuf_queue_remove(rx_sg_queue)) != NULL) {
        adf_nbuf_free(skb);
    }

    RESET_RX_SG_CONFIG(target);
    return NULL;
}
Пример #2
0
static void usb_hif_free_pipe_resources(HIF_USB_PIPE *pipe)
{
	HIF_URB_CONTEXT *urb_context;
	adf_nbuf_t nbuf;

	if (NULL == pipe->device) {
		/* nothing allocated for this pipe */
		AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("pipe->device is null\n"));
		return;
	}

	AR_DEBUG_PRINTF(ATH_DEBUG_TRC, (
			 "athusb: free resources lpipe:%d hpipe:0x%X urbs:%d avail:%d\n",
			 pipe->logical_pipe_num,
			 pipe->usb_pipe_handle, pipe->urb_alloc,
			 pipe->urb_cnt));

	if (pipe->urb_alloc != pipe->urb_cnt) {
		AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (
				 "athusb: urb leak! lpipe:%d hpipe:0x%X urbs:%d avail:%d\n",
				 pipe->logical_pipe_num,
				 pipe->usb_pipe_handle, pipe->urb_alloc,
				 pipe->urb_cnt));
	}

	while (TRUE) {
		urb_context = usb_hif_alloc_urb_from_pipe(pipe);
		if (NULL == urb_context)
			break;

		if (urb_context->buf) {
			adf_nbuf_free(urb_context->buf);
			urb_context->buf = NULL;
		}

		if (htc_bundle_send) {
			while ((nbuf =
				skb_dequeue(&urb_context->comp_queue)) !=
			       NULL) {
				adf_nbuf_free(nbuf);
			}
		}

		usb_free_urb(urb_context->urb);
		urb_context->urb = NULL;
		adf_os_mem_free(urb_context);
	}

}
Пример #3
0
/**
 * @brief Free the RX Ring ( S/W & H/W), dequeue all the SKB's
 *        and free them starting from the head
 * NOTE: The NULL terminator doesn't have a SKB
 * 
 * @param osdev
 * @param dma_q
 */
void    
pci_dma_deinit_rx(adf_os_device_t osdev, pci_dma_softc_t *dma_q) 
{
    a_uint32_t      i, num_desc;
    zdma_swdesc_t  *swdesc;
    adf_nbuf_t      buf;

    num_desc = dma_q->num_desc;
    swdesc = dma_q->sw_ring;

    for (i = 0; i < num_desc; i++, swdesc++) {
        
        pci_zdma_mark_notrdy(swdesc);

        adf_nbuf_unmap(osdev, swdesc->nbuf_map, ADF_OS_DMA_TO_DEVICE);
        
        buf = pci_dma_unlink_buf(osdev, swdesc);
        
        adf_os_assert(buf);
        
        adf_nbuf_free(buf);

        adf_nbuf_dmamap_destroy(osdev, swdesc->nbuf_map);
    }

    pci_dma_free_swdesc(osdev, dma_q, num_desc);
}
/* WMI Event handler register API */
int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
					WMI_EVT_ID event_id)
{
	u_int32_t idx = 0;
	for (idx = 0; (idx < wmi_handle->max_event_idx &&
		idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
		if (wmi_handle->event_id[idx] == event_id &&
			wmi_handle->event_handler[idx] != NULL ) {
			return idx;
		}
	}
	return  -1;
}

int wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
                                       WMI_EVT_ID event_id,
				       wmi_unified_event_handler handler_func)
{
	u_int32_t idx=0;

    if ( wmi_unified_get_event_handler_ix( wmi_handle, event_id) != -1) {
	printk("%s : event handler already registered 0x%x \n",
		__func__, event_id);
        return -1;
    }
    if ( wmi_handle->max_event_idx == WMI_UNIFIED_MAX_EVENT ) {
	printk("%s : no more event handlers 0x%x \n",
                __func__, event_id);
        return -1;
    }
    idx=wmi_handle->max_event_idx;
    wmi_handle->event_handler[idx] = handler_func;
    wmi_handle->event_id[idx] = event_id;
    wmi_handle->max_event_idx++;

    return 0;
}

int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
                                       WMI_EVT_ID event_id)
{
    u_int32_t idx=0;
    if ( (idx = wmi_unified_get_event_handler_ix( wmi_handle, event_id)) == -1) {
        printk("%s : event handler is not registered: event id 0x%x \n",
                __func__, event_id);
        return -1;
    }
    wmi_handle->event_handler[idx] = NULL;
    wmi_handle->event_id[idx] = 0;
    --wmi_handle->max_event_idx;
    wmi_handle->event_handler[idx] = wmi_handle->event_handler[wmi_handle->max_event_idx];
    wmi_handle->event_id[idx]  = wmi_handle->event_id[wmi_handle->max_event_idx] ;
    return 0;
}

#if 0 /* currently not used */
static int wmi_unified_event_rx(struct wmi_unified *wmi_handle,
				wmi_buf_t evt_buf)
{
	u_int32_t id;
	u_int8_t *event;
	u_int16_t len;
	int status = -1;
	u_int32_t idx = 0;

	ASSERT(evt_buf != NULL);

	id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);

	if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
		goto end;

	idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
	if (idx == -1) {
		pr_err("%s : event handler is not registered: event id: 0x%x\n",
		       __func__, id);
		goto end;
	}

	event = adf_nbuf_data(evt_buf);
	len = adf_nbuf_len(evt_buf);

	/* Call the WMI registered event handler */
	status = wmi_handle->event_handler[idx](wmi_handle->scn_handle,
						event, len);

end:
	adf_nbuf_free(evt_buf);
	return status;
}
Пример #5
0
/*
 * Terminate continuous transmit operation.
 */
static void
tx99_stop(struct ath_softc *sc, int flag)
{
    struct ath_tx99 *tx99 = sc->sc_tx99;
    struct ieee80211com *ic = (struct ieee80211com *)sc->sc_ieee;
#ifdef ATH_SUPPORT_HTC
    struct ath_hal *ah = sc->sc_ah;
#else
    adf_nbuf_t  skb = tx99->skb;
#endif
    if (tx99->tx99_state == 0)
        return;
#ifdef ATH_SUPPORT_HTC
    ah_tx99_stop(ah);
    ic->ic_reset_start(ic,0);
    ic->ic_reset(ic);
    ic->ic_reset_end(ic,0);
#else
    ath_tx99_tgt_stop(sc);
    if (flag == 0) {
        ic->ic_reset_start(ic,0);
        ic->ic_reset(ic);
        ic->ic_reset_end(ic,0);
    } else if (flag == 1) {
        ic->ic_reset_start(ic,0);
        ic->ic_reset(ic);
    } else if (flag == 2) {
        ic->ic_reset_end(ic,0);
    }
    adf_nbuf_free(skb);
#endif
    tx99->tx99_state = 0;
    adf_os_print("%s: continuous transmit stopped\n", __FUNCTION__);
}
Пример #6
0
static void DestroyHTCTxCtrlPacket(HTC_PACKET *pPacket)
{
    adf_nbuf_t netbuf;
    netbuf = (adf_nbuf_t)GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket);
    AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("free ctrl netbuf :0x%p \n", netbuf));
    if (netbuf != NULL) {
        adf_nbuf_free(netbuf);
    }

    A_FREE(pPacket);
}
Пример #7
0
static void usb_hif_cleanup_recv_urb(HIF_URB_CONTEXT *urb_context)
{
	AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+%s\n", __func__));

	if (urb_context->buf != NULL) {
		adf_nbuf_free(urb_context->buf);
		urb_context->buf = NULL;
	}

	usb_hif_free_urb_to_pipe(urb_context->pipe, urb_context);
	AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-%s\n", __func__));
}
Пример #8
0
hif_status_t
fwd_recv(void *context, adf_nbuf_t nbuf, a_uint8_t epid)
{
    fwd_softc_t *sc = (fwd_softc_t *)context;
    a_uint8_t *pld;
    a_uint32_t plen, rsp, offset;
    fwd_rsp_t *h; 

    adf_nbuf_peek_header(nbuf, &pld, &plen);

    h       = (fwd_rsp_t *)pld;
    rsp     = adf_os_ntohl(h->rsp);
    offset  = adf_os_ntohl(h->offset);
    
    /*adf_os_timer_cancel(&sc->tmr);*/

    switch(rsp) {

    case FWD_RSP_ACK:
        if (offset == sc->offset) {
            // adf_os_printk("ACK for %#x\n", offset);
            adf_os_print(".");
                sc->offset += fwd_chunk_len(sc);
                fwd_send_next(sc);
        }
        break;
            
    case FWD_RSP_SUCCESS:
        adf_os_print("done!\n");

        hif_boot_done(sc->hif_handle);
        break;

    case FWD_RSP_FAILED:
        if (sc->ntries < FWD_MAX_TRIES) 
            fwd_start_upload(sc);
        else
                adf_os_print("FWD: Error: Max retries exceeded\n");
        break;
        
    default:
            adf_os_assert(0);
    }

    adf_nbuf_free(nbuf);

    return A_OK;
}
void
wmi_unified_detach(struct wmi_unified* wmi_handle)
{
    wmi_buf_t buf;

    vos_flush_work(&wmi_handle->rx_event_work);
    adf_os_spin_lock_bh(&wmi_handle->eventq_lock);
    buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
    while (buf) {
	adf_nbuf_free(buf);
	buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
    }
    adf_os_spin_unlock_bh(&wmi_handle->eventq_lock);
    if (wmi_handle != NULL) {
        OS_FREE(wmi_handle);
        wmi_handle = NULL;
    }
}
/**
 * wmi_unified_remove_work() - detach for WMI work
 * @wmi_handle: handle to WMI
 *
 * A function that does not fully detach WMI, but just remove work
 * queue items associated with it. This is used to make sure that
 * before any other processing code that may destroy related contexts
 * (HTC, etc), work queue processing on WMI has already been stopped.
 *
 * Return: void.
 */
void
wmi_unified_remove_work(struct wmi_unified* wmi_handle)
{
	wmi_buf_t buf;

	VOS_TRACE( VOS_MODULE_ID_WDA, VOS_TRACE_LEVEL_INFO,
		"Enter: %s", __func__);
	vos_flush_work(&wmi_handle->rx_event_work);
	adf_os_spin_lock_bh(&wmi_handle->eventq_lock);
	buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
	while (buf) {
		adf_nbuf_free(buf);
		buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
	}
	adf_os_spin_unlock_bh(&wmi_handle->eventq_lock);
	VOS_TRACE( VOS_MODULE_ID_WDA, VOS_TRACE_LEVEL_INFO,
		"Done: %s", __func__);
}
void epping_tx_dup_pkt(epping_adapter_t *pAdapter,
                       HTC_ENDPOINT_ID eid, adf_nbuf_t skb)
{
   struct epping_cookie * cookie = NULL;
   int skb_len, ret;
   adf_nbuf_t new_skb;

   cookie = epping_alloc_cookie(pAdapter->pEpping_ctx);
   if (cookie == NULL) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: epping_alloc_cookie returns no resource\n", __func__);
      return;
   }
   new_skb = adf_nbuf_copy(skb);
   if (!new_skb) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: adf_nbuf_copy returns no resource\n", __func__);
      epping_free_cookie(pAdapter->pEpping_ctx, cookie);
      return;
   }
   SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt,
      cookie, adf_nbuf_data(skb), adf_nbuf_len(new_skb), eid, 0);
   SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, new_skb);
   skb_len = (int)adf_nbuf_len(new_skb);
   /* send the packet */
   ret = HTCSendPkt(pAdapter->pEpping_ctx->HTCHandle, &cookie->HtcPkt);
   if (ret != A_OK) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: HTCSendPkt failed, ret = %d\n", __func__, ret);
      epping_free_cookie(pAdapter->pEpping_ctx, cookie);
      adf_nbuf_free(new_skb);
      return;
   }
   pAdapter->stats.tx_bytes += skb_len;
   ++pAdapter->stats.tx_packets;
   if (((pAdapter->stats.tx_packets +
         pAdapter->stats.tx_dropped) % EPPING_STATS_LOG_COUNT) == 0 &&
       (pAdapter->stats.tx_packets || pAdapter->stats.tx_dropped)) {
       epping_log_stats(pAdapter, __func__);
   }
}
Пример #12
0
/* stop HTC communications, i.e. stop interrupt reception, and flush all queued buffers */
void HTCStop(HTC_HANDLE HTCHandle)
{
    HTC_TARGET    *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
    int           i;
    HTC_ENDPOINT  *pEndpoint;
#ifdef RX_SG_SUPPORT
    adf_nbuf_t netbuf;
    adf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue;
#endif

    AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCStop \n"));

        /* cleanup endpoints */
    for (i = 0; i < ENDPOINT_MAX; i++) {
        pEndpoint = &target->EndPoint[i];
        HTCFlushRxHoldQueue(target,pEndpoint);
        HTCFlushEndpointTX(target,pEndpoint,HTC_TX_PACKET_TAG_ALL);
    }

    /* Note: HTCFlushEndpointTX for all endpoints should be called before
     * HIFStop - otherwise HTCTxCompletionHandler called from
     * hif_send_buffer_cleanup_on_pipe for residual tx frames in HIF layer,
     * might queue the packet again to HIF Layer - which could cause tx
     * buffer leak
     */

    HIFStop(target->hif_dev);

#ifdef RX_SG_SUPPORT
    LOCK_HTC_RX(target);
    while ((netbuf = adf_nbuf_queue_remove(rx_sg_queue)) != NULL) {
        adf_nbuf_free(netbuf);
    }
    RESET_RX_SG_CONFIG(target);
    UNLOCK_HTC_RX(target);
#endif

    ResetEndpointStates(target);

    AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCStop \n"));
}
Пример #13
0
/* cleanup the HTC instance */
static void HTCCleanup(HTC_TARGET *target)
{
    HTC_PACKET *pPacket;
    //adf_nbuf_t netbuf;

    if (target->hif_dev != NULL) {
        HIFDetachHTC(target->hif_dev);
        target->hif_dev = NULL;
    }

    while (TRUE) {
        pPacket = AllocateHTCPacketContainer(target);
        if (NULL == pPacket) {
            break;
        }
        A_FREE(pPacket);
    }

#ifdef TODO_FIXME
    while (TRUE) {
        pPacket = HTCAllocControlTxPacket(target);
        if (NULL == pPacket) {
            break;
        }
        netbuf = (adf_nbuf_t)GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket);
        if (netbuf != NULL) {
            adf_nbuf_free(netbuf);
        }

        A_FREE(pPacket);
    }
#endif

    adf_os_spinlock_destroy(&target->HTCLock);
    adf_os_spinlock_destroy(&target->HTCRxLock);
    adf_os_spinlock_destroy(&target->HTCTxLock);

    /* free our instance */
    A_FREE(target);
}
void epping_unregister_tx_copier(HTC_ENDPOINT_ID eid, epping_context_t *pEpping_ctx)
{
   epping_poll_t *epping_poll;

   if (eid < 0 || eid >= EPPING_MAX_NUM_EPIDS ) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: invalid eid = %d",
         __func__, eid);
      return;
   }

   epping_poll = &pEpping_ctx->epping_poll[eid];

   epping_poll->done = true;
   if (epping_poll->inited) {
      epping_tx_copier_schedule(pEpping_ctx, eid, NULL);
      msleep(EPPING_KTID_KILL_WAIT_TIME_MS);
   }
   if (epping_poll->skb)
      adf_nbuf_free(epping_poll->skb);
   OS_MEMZERO(epping_poll, sizeof(epping_poll_t));
   EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: eid = %d",
               __func__, eid);
}
void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
{
	struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx;
	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
#ifdef WMI_INTERFACE_EVENT_LOGGING
	u_int32_t cmd_id;
#endif

	ASSERT(wmi_cmd_buf);
#ifdef WMI_INTERFACE_EVENT_LOGGING
	cmd_id = WMI_GET_FIELD(adf_nbuf_data(wmi_cmd_buf),
		WMI_CMD_HDR, COMMANDID);
	adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
	/* Record 16 bytes of WMI cmd tx complete data
	   - exclude TLV and WMI headers */
	WMI_COMMAND_TX_CMP_RECORD(cmd_id,
		((u_int32_t *)adf_nbuf_data(wmi_cmd_buf) + 2));
	adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif
	adf_nbuf_free(wmi_cmd_buf);
	adf_os_mem_free(htc_pkt);
	adf_os_atomic_dec(&wmi_handle->pending_cmds);
}
Пример #16
0
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
   u_int16_t idx;

   if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   }

   if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
     adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   }

   /* Free each single buffer */
   for(idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
      if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
         adf_nbuf_unmap(pdev->osdev,
            pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
            ADF_OS_DMA_FROM_DEVICE);
         adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
      }
   }

   /* Free storage */
   adf_os_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);

   return 0;
}
Пример #17
0
/*
 * Start continuous transmit operation.
 */
static int
tx99_start(struct ath_softc *sc)
{
    static struct ath_tx99_tgt tx99_tgt;
    struct ath_tx99 *tx99 = sc->sc_tx99;
    struct ath_hal *ah = sc->sc_ah;
    int is2GHz = 0;
    adf_nbuf_t 	skb;
    HAL_CHANNEL *c = NULL;
    if(tx99->recv)
    {
        ath_hal_phydisable(ah);
        //adf_os_print("%s: device %s tx99 continuous receive mode\n", __FUNCTION__, adf_net_ifname(sc->sc_ic->ic_dev));
        return 0;
    }
    /* check tx99 running state */
    if(tx99->tx99_state){		/* already active */
        adf_os_print("%s: already running\n", __FUNCTION__);
        return 0;
    }
    /* set tx99 state active */
    tx99->tx99_state = 1;
    /* allocate diag packet buffer */
    tx99->skb = ath_alloc_skb_tx99(sc->sc_osdev, 2000, 32);
    if (tx99->skb == NULL) {
        adf_os_print("%s: unable to allocate skb\n", __FUNCTION__);
        tx99->tx99_state = 0;
        return -ENOMEM;
    }
    skb = tx99->skb;
    /* drain all tx queue */
    ath_drain_txq(sc);
    /*
     * Setup channel using configured frequency+flags.
     */
    if (tx99_channel_setup(sc) != EOK) {
        adf_os_print("%s: unable to setup operation\n", __FUNCTION__);
        tx99->tx99_state = 0;
        adf_nbuf_free(skb);
        return -EIO;
    }
    /*disable desc tpc */
    ath_hal_settpc(ah,0);
    /*
     * Setup tx power limit
     */ 
    c = &sc->sc_curchan; 
    is2GHz = TX99_IS_CHAN_2GHZ(c);
    ath_hal_settxpowlimit(ah,tx99->txpower,0,is2GHz);
    /* set tx99 enable */
    tx99_tgt.txrate = adf_os_htonl(tx99->txrate);
    tx99_tgt.txpower = adf_os_htonl(tx99->txpower);
    tx99_tgt.txchain = adf_os_htonl(tx99->chanmask);
    tx99_tgt.htmode = adf_os_htonl(tx99->htmode);
    tx99_tgt.type = adf_os_htonl(tx99->type);
    tx99_tgt.chtype = adf_os_htonl(TX99_IS_CHAN_5GHZ(c));
    tx99_tgt.txantenna = adf_os_htonl(0);
    if( tx99->txpower < 60 ) /* only update channel pwr if not default MAX power */
        ath_hal_tx99_channel_pwr_update(ah, c, tx99->txpower);
#ifdef ATH_SUPPORT_HTC
    ah_tx99_start(ah, (u_int8_t *)&tx99_tgt);
    /* send diag packet */
    {
        struct  ath_txep *txep;
        adf_nbuf_t 	skb;

        A_STATUS ret;
        adf_nbuf_put_tail(skb, 1500);
        txep = sc->sc_ac2ep[WME_AC_VO];
        /* send packet to target */
        ret = HTCSendPkt(sc->sc_host_htc_handle, NULL ,skb, sc->sc_data_VO_ep);
        if(ret)
        {
            adf_os_print("%s: tx99 fail \n", __FUNCTION__);
            tx99_stop(sc, 0);
        }   
    }
#else
    adf_nbuf_put_tail(skb, 1500);
    if (ath_tx99_tgt_start(sc, tx99_tgt.chtype) != EOK) {
        adf_os_print("%s: tx99 fail \n", __FUNCTION__);
        tx99_stop(sc, 0);
    }
#endif
    /* wait a while to make sure target setting ready */
    adf_os_mdelay(50);
    adf_os_print("%s: continuous transmit started\n", __FUNCTION__);
    return 0;
}
Пример #18
0
hif_status_t
fwd_txdone(void *context, adf_nbuf_t nbuf)
{
  adf_nbuf_free(nbuf);  
  return HIF_OK;
}
Пример #19
0
A_STATUS HIFSend(HIF_HANDLE hHIF, a_uint8_t PipeID, adf_nbuf_t hdr_buf, adf_nbuf_t buf)
{
    A_STATUS status = A_OK;
    HIF_DEVICE_USB *macp = (HIF_DEVICE_USB *)hHIF;
    adf_nbuf_t sendBuf;
    a_uint8_t *data = NULL;
    a_uint32_t len = 0;

    /* If necessary, link hdr_buf & buf */
    if (hdr_buf != NULL) 
    {
        adf_nbuf_cat(hdr_buf, buf);
        sendBuf = hdr_buf;
    }
    else
    {
        sendBuf = buf;
    }

    adf_nbuf_peek_header(sendBuf, &data, &len);
    
    if ( PipeID == HIF_USB_PIPE_COMMAND ) 
    {
#ifdef ATH_WINHTC
        a_uint8_t *data = NULL;
        a_uint32_t len;
        adf_nbuf_peek_header(sendBuf, &data, &len);

        status = OS_Usb_SubmitCmdOutUrb(macp->os_hdl, data, len, (void*)sendBuf);        
#else        
        status = ((osdev_t)macp->os_hdl)->os_usb_submitCmdOutUrb(macp->os_hdl, data, len, (void*)sendBuf);
#endif        
    }
    else if ( PipeID == HIF_USB_PIPE_TX )
    {
#ifdef ATH_WINHTC
        a_uint8_t *data = NULL;
        a_uint32_t len;

        adf_nbuf_peek_header(sendBuf, &data, &len);

        status = OS_Usb_SubmitTxUrb(macp->os_hdl, data, len, (void*)sendBuf);
#else        
        status = ((osdev_t)macp->os_hdl)->os_usb_submitTxUrb(macp->os_hdl, data, len, (void*)sendBuf, &(((osdev_t)macp->os_hdl)->TxPipe));
#endif        
    }
    else if ( PipeID == HIF_USB_PIPE_HP_TX )
    {
#ifdef ATH_WINHTC
        a_uint8_t *data = NULL;
        a_uint32_t len;

        adf_nbuf_peek_header(sendBuf, &data, &len);

        status = OS_Usb_SubmitTxUrb(macp->os_hdl, data, len, (void*)sendBuf);
#else        
        status = ((osdev_t)macp->os_hdl)->os_usb_submitTxUrb(macp->os_hdl, data, len, (void*)sendBuf, &(((osdev_t)macp->os_hdl)->HPTxPipe));        
#endif        
    }
    else
    {
        adf_os_print("Unknown pipe %d\n", PipeID);
        adf_nbuf_free(sendBuf);
    }

    return status;
}
LOCAL void _buf_pool_dynamic_free_buf(pool_handle_t handle, BUF_POOL_ID poolId, adf_nbuf_t buf)
{
    //BUF_POOL_DYNAMIC_CONTEXT *ctx = (BUF_POOL_DYNAMIC_CONTEXT *)handle;
        
    adf_nbuf_free(buf);
}
/* Generic Target to host Msg/event  handler  for low priority messages
  Low priority message are handler in a different handler called from
  this function . So that the most likely succes path like Rx and
  Tx comp   has little code   foot print
 */
void
htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
{
    struct htt_pdev_t *pdev = (struct htt_pdev_t *) context;
    adf_nbuf_t htt_t2h_msg = (adf_nbuf_t) pkt->pPktContext;
    u_int32_t *msg_word;
    enum htt_t2h_msg_type msg_type;

    /* check for successful message reception */
    if (pkt->Status != A_OK) {
        if (pkt->Status != A_ECANCELED) {
            pdev->stats.htc_err_cnt++;
        }
        adf_nbuf_free(htt_t2h_msg);
        return;
    }

#ifdef HTT_RX_RESTORE
if (adf_os_unlikely(pdev->rx_ring.rx_reset)) {
        adf_os_print("rx restore ..\n");
        adf_nbuf_free(htt_t2h_msg);
        return;
    }
#endif

    /* confirm alignment */
    HTT_ASSERT3((((unsigned long) adf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);

    msg_word = (u_int32_t *) adf_nbuf_data(htt_t2h_msg);
    msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
    switch (msg_type) {
    case HTT_T2H_MSG_TYPE_RX_IND:
        {
            unsigned num_mpdu_ranges;
            unsigned num_msdu_bytes;
            u_int16_t peer_id;
            u_int8_t tid;

            if (adf_os_unlikely(pdev->cfg.is_full_reorder_offload)) {
                adf_os_print("HTT_T2H_MSG_TYPE_RX_IND not supported with full "
                             "reorder offload\n");
                break;
            }
            peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
            tid = HTT_RX_IND_EXT_TID_GET(*msg_word);

            if (tid >= OL_TXRX_NUM_EXT_TIDS) {
                adf_os_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n", tid);
                break;
            }

            num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
                *(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
            /*
             * 1 word for the message header,
             * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc
             * 1 word to specify the number of MSDU bytes,
             * 1 word for every 4 MSDU bytes (round up),
             * 1 word for the MPDU range header
             */
            pdev->rx_mpdu_range_offset_words =
                (HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3) >> 2;
            num_mpdu_ranges = HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
            pdev->rx_ind_msdu_byte_idx = 0;

            if (pdev->cfg.is_high_latency) {
                /*
                 * TODO: remove copy after stopping reuse skb on HIF layer
                 * because SDIO HIF may reuse skb before upper layer release it
                 */
                ol_rx_indication_handler(
                    pdev->txrx_pdev, htt_t2h_msg, peer_id, tid,
                    num_mpdu_ranges);

                return;
            } else {
                ol_rx_indication_handler(
                    pdev->txrx_pdev, htt_t2h_msg, peer_id, tid,
                    num_mpdu_ranges);
            }
            break;
        }
    case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
        {
            int num_msdus;
            enum htt_tx_status status;

            /* status - no enum translation needed */
            status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
            num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
            if (num_msdus & 0x1) {
                struct htt_tx_compl_ind_base *compl = (void *)msg_word;

                /*
                 * Host CPU endianness can be different from FW CPU. This
                 * can result in even and odd MSDU IDs being switched. If
                 * this happens, copy the switched final odd MSDU ID from
                 * location payload[size], to location payload[size-1],
                 * where the message handler function expects to find it
                 */
                if (compl->payload[num_msdus] != HTT_TX_COMPL_INV_MSDU_ID) {
                    compl->payload[num_msdus - 1] =
                        compl->payload[num_msdus];
                }
            }
            if (pdev->cfg.is_high_latency) {
                ol_tx_target_credit_update(
                    pdev->txrx_pdev, num_msdus /* 1 credit per MSDU */);
            }
            ol_tx_completion_handler(
                pdev->txrx_pdev, num_msdus, status, msg_word + 1);
            HTT_TX_SCHED(pdev);
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_PN_IND:
        {
            u_int16_t peer_id;
            u_int8_t tid, pn_ie_cnt, *pn_ie=NULL;
            int seq_num_start, seq_num_end;

            /*First dword */
            peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
            tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);

            msg_word++;
            /*Second dword */
            seq_num_start = HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
            seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
            pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);

            msg_word++;
            /*Third dword*/
            if (pn_ie_cnt) {
                pn_ie = (u_int8_t *)msg_word;
            }

            ol_rx_pn_ind_handler(
                pdev->txrx_pdev, peer_id, tid, seq_num_start, seq_num_end,
                pn_ie_cnt, pn_ie);

            break;
        }
    case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
        {
            int num_msdus;

            num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
            if (num_msdus & 0x1) {
                struct htt_tx_compl_ind_base *compl = (void *)msg_word;

                /*
                 * Host CPU endianness can be different from FW CPU. This
                 * can result in even and odd MSDU IDs being switched. If
                 * this happens, copy the switched final odd MSDU ID from
                 * location payload[size], to location payload[size-1],
                 * where the message handler function expects to find it
                 */
                if (compl->payload[num_msdus] != HTT_TX_COMPL_INV_MSDU_ID) {
                    compl->payload[num_msdus - 1] =
                        compl->payload[num_msdus];
                }
            }
            ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus, msg_word + 1);
            HTT_TX_SCHED(pdev);
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
        {
            u_int16_t peer_id;
            u_int8_t tid;
            u_int8_t offload_ind, frag_ind;

            if (adf_os_unlikely(!pdev->cfg.is_full_reorder_offload)) {
                adf_os_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported"
                             " when full reorder offload is disabled\n");
                break;
            }

            if (adf_os_unlikely(pdev->cfg.is_high_latency)) {
                adf_os_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported"
                             " on high latency\n");
                break;
            }

            peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
            tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
            offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
            frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);

            if (adf_os_unlikely(frag_ind)) {
                ol_rx_frag_indication_handler(pdev->txrx_pdev, htt_t2h_msg,
                                               peer_id, tid);
                break;
            }

            ol_rx_in_order_indication_handler(pdev->txrx_pdev, htt_t2h_msg,
                                               peer_id, tid, offload_ind);
            break;
     }

    default:
        htt_t2h_lp_msg_handler(context, htt_t2h_msg);
        return ;

    };

    /* Free the indication buffer */
    adf_nbuf_free(htt_t2h_msg);
}
/* Target to host Msg/event  handler  for low priority messages*/
void
htt_t2h_lp_msg_handler(void *context, adf_nbuf_t htt_t2h_msg )
{
    struct htt_pdev_t *pdev = (struct htt_pdev_t *) context;
    u_int32_t *msg_word;
    enum htt_t2h_msg_type msg_type;

    msg_word = (u_int32_t *) adf_nbuf_data(htt_t2h_msg);
    msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
    switch (msg_type) {
    case HTT_T2H_MSG_TYPE_VERSION_CONF:
        {
            htc_pm_runtime_put(pdev->htc_pdev);
            pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
            pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
            adf_os_print("target uses HTT version %d.%d; host uses %d.%d\n",
                pdev->tgt_ver.major, pdev->tgt_ver.minor,
                HTT_CURRENT_VERSION_MAJOR, HTT_CURRENT_VERSION_MINOR);
            if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
                adf_os_print("*** Incompatible host/target HTT versions!\n");
            }
            /* abort if the target is incompatible with the host */
            adf_os_assert(pdev->tgt_ver.major == HTT_CURRENT_VERSION_MAJOR);
            if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
                adf_os_print(
                    "*** Warning: host/target HTT versions are different, "
                    "though compatible!\n");
            }
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_FLUSH:
        {
            u_int16_t peer_id;
            u_int8_t tid;
            int seq_num_start, seq_num_end;
            enum htt_rx_flush_action action;

            peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word);
            tid = HTT_RX_FLUSH_TID_GET(*msg_word);
            seq_num_start = HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word+1));
            seq_num_end = HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word+1));
            action =
                HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word+1)) == 1 ?
                htt_rx_flush_release : htt_rx_flush_discard;
            ol_rx_flush_handler(
                pdev->txrx_pdev,
                peer_id, tid,
                seq_num_start,
                seq_num_end,
                action);
            break;
        }
    case  HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND:
        {
            int msdu_cnt;
            msdu_cnt = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word);
            ol_rx_offload_deliver_ind_handler(
                pdev->txrx_pdev,
                htt_t2h_msg,
                msdu_cnt);
            break;
        }
    case  HTT_T2H_MSG_TYPE_RX_FRAG_IND:
        {
            u_int16_t peer_id;
            u_int8_t tid;

            peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word);
            tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word);
            HTT_RX_FRAG_SET_LAST_MSDU(pdev, htt_t2h_msg);

            ol_rx_frag_indication_handler(
                pdev->txrx_pdev,
                htt_t2h_msg,
                peer_id,
                tid);
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_ADDBA:
        {
            u_int16_t peer_id;
            u_int8_t tid;
            u_int8_t win_sz;
            u_int16_t start_seq_num;

            /*
             * FOR NOW, the host doesn't need to know the initial
             * sequence number for rx aggregation.
             * Thus, any value will do - specify 0.
             */
            start_seq_num = 0;
            peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
            tid = HTT_RX_ADDBA_TID_GET(*msg_word);
            win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
            ol_rx_addba_handler(
                pdev->txrx_pdev, peer_id, tid, win_sz, start_seq_num,
                0 /* success */);
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_DELBA:
        {
            u_int16_t peer_id;
            u_int8_t tid;

            peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
            tid = HTT_RX_DELBA_TID_GET(*msg_word);
            ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid);
            break;
        }
    case HTT_T2H_MSG_TYPE_PEER_MAP:
        {
            u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
            u_int8_t *peer_mac_addr;
            u_int16_t peer_id;
            u_int8_t vdev_id;

            peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
            vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
            peer_mac_addr = htt_t2h_mac_addr_deswizzle(
                (u_int8_t *) (msg_word+1), &mac_addr_deswizzle_buf[0]);

            ol_rx_peer_map_handler(
                pdev->txrx_pdev, peer_id, vdev_id, peer_mac_addr, 1/*can tx*/);
            break;
        }
    case HTT_T2H_MSG_TYPE_PEER_UNMAP:
        {
            u_int16_t peer_id;
            peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);

            ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id);
            break;
        }
    case HTT_T2H_MSG_TYPE_SEC_IND:
        {
            u_int16_t peer_id;
            enum htt_sec_type sec_type;
            int is_unicast;

            peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
            sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
            is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
            msg_word++; /* point to the first part of the Michael key */
            ol_rx_sec_ind_handler(
                pdev->txrx_pdev, peer_id, sec_type, is_unicast, msg_word, msg_word+2);
            break;
        }
    case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
        {
            struct htt_mgmt_tx_compl_ind *compl_msg;

            compl_msg = (struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
            if (pdev->cfg.is_high_latency) {
                ol_tx_target_credit_update(pdev->txrx_pdev, 1);
            }
            ol_tx_single_completion_handler(
                pdev->txrx_pdev, compl_msg->status, compl_msg->desc_id);
            htc_pm_runtime_put(pdev->htc_pdev);
            HTT_TX_SCHED(pdev);
            break;
        }
#if TXRX_STATS_LEVEL != TXRX_STATS_LEVEL_OFF
    case HTT_T2H_MSG_TYPE_STATS_CONF:
        {
            u_int64_t cookie;
            u_int8_t *stats_info_list;

            cookie = *(msg_word + 1);
            cookie |= ((u_int64_t) (*(msg_word + 2))) << 32;

            stats_info_list = (u_int8_t *) (msg_word + 3);
            htc_pm_runtime_put(pdev->htc_pdev);
            ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie, stats_info_list);
            break;
        }
#endif
#ifndef REMOVE_PKT_LOG
    case HTT_T2H_MSG_TYPE_PKTLOG:
        {
            u_int32_t *pl_hdr;
            u_int32_t log_type;
            pl_hdr = (msg_word + 1);
            log_type = (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
                                            ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
            if (log_type == PKTLOG_TYPE_TX_CTRL ||
               (log_type) == PKTLOG_TYPE_TX_STAT ||
               (log_type) == PKTLOG_TYPE_TX_MSDU_ID ||
               (log_type) == PKTLOG_TYPE_TX_FRM_HDR ||
               (log_type) == PKTLOG_TYPE_TX_VIRT_ADDR) {
                wdi_event_handler(WDI_EVENT_TX_STATUS, pdev->txrx_pdev, pl_hdr);
            } else if ((log_type) == PKTLOG_TYPE_RC_FIND) {
                wdi_event_handler(WDI_EVENT_RATE_FIND, pdev->txrx_pdev, pl_hdr);
            } else if ((log_type) == PKTLOG_TYPE_RC_UPDATE) {
                wdi_event_handler(
                    WDI_EVENT_RATE_UPDATE, pdev->txrx_pdev, pl_hdr);
            } else if ((log_type) == PKTLOG_TYPE_RX_STAT) {
                wdi_event_handler(WDI_EVENT_RX_DESC, pdev->txrx_pdev, pl_hdr);
            }
            break;
        }
#endif
    case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
    {
        u_int32_t htt_credit_delta_abs;
        int32_t htt_credit_delta;
        int sign;

        htt_credit_delta_abs = HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
        sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
        htt_credit_delta = sign * htt_credit_delta_abs;
        ol_tx_credit_completion_handler(pdev->txrx_pdev, htt_credit_delta);
        break;
    }

#ifdef IPA_UC_OFFLOAD
    case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
        {
            u_int8_t op_code;
            u_int16_t len;
            u_int8_t *op_msg_buffer;
            u_int8_t *msg_start_ptr;

            htc_pm_runtime_put(pdev->htc_pdev);
            msg_start_ptr = (u_int8_t *)msg_word;
            op_code = HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
            msg_word++;
            len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);

            op_msg_buffer = adf_os_mem_alloc(NULL,
                sizeof(struct htt_wdi_ipa_op_response_t) + len);
            if (!op_msg_buffer) {
                adf_os_print("OPCODE messsage buffer alloc fail");
                break;
            }
            adf_os_mem_copy(op_msg_buffer,
                    msg_start_ptr,
                    sizeof(struct htt_wdi_ipa_op_response_t) + len);
            ol_txrx_ipa_uc_op_response(pdev->txrx_pdev, op_msg_buffer);
            break;
        }
#endif /* IPA_UC_OFFLOAD */

    default:
        break;
    };
    /* Free the indication buffer */
    adf_nbuf_free(htt_t2h_msg);
}
/*
 * Temporarily added to support older WMI events. We should move all events to unified
 * when the target is ready to support it.
 */
void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
{
	struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx;
	wmi_buf_t evt_buf;
	u_int32_t len;
	void *wmi_cmd_struct_ptr = NULL;
	u_int32_t idx = 0;
	int tlv_ok_status = 0;

#if  defined(WMI_INTERFACE_EVENT_LOGGING) || !defined(QCA_CONFIG_SMP)
	u_int32_t id;
	u_int8_t *data;
#endif

	evt_buf = (wmi_buf_t) htc_packet->pPktContext;
	id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
	/* TX_PAUSE EVENT should be handled with tasklet context */
	if ((WMI_TX_PAUSE_EVENTID == id) ||
		(WMI_WOW_WAKEUP_HOST_EVENTID == id)) {
		if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
			return;

		data = adf_nbuf_data(evt_buf);
		len = adf_nbuf_len(evt_buf);
		tlv_ok_status = wmitlv_check_and_pad_event_tlvs(
					wmi_handle->scn_handle,
					data, len, id,
					&wmi_cmd_struct_ptr);
		if (tlv_ok_status != 0) {
			if (tlv_ok_status == 1) {
				wmi_cmd_struct_ptr = data;
			} else {
				return;
			}
		}

		idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
		if (idx == -1) {
			wmitlv_free_allocated_event_tlvs(id,
				&wmi_cmd_struct_ptr);
			adf_nbuf_free(evt_buf);
			return;
		}
		wmi_handle->event_handler[idx](wmi_handle->scn_handle,
			       wmi_cmd_struct_ptr, len);
		wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr);
		adf_nbuf_free(evt_buf);
		return;
	}

#ifdef WMI_INTERFACE_EVENT_LOGGING
	id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
	data = adf_nbuf_data(evt_buf);

	adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
	/* Exclude 4 bytes of TLV header */
	WMI_RX_EVENT_RECORD(id, ((u_int8_t *)data + 4));
	adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif
	adf_os_spin_lock_bh(&wmi_handle->eventq_lock);
	adf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
	adf_os_spin_unlock_bh(&wmi_handle->eventq_lock);
	schedule_work(&wmi_handle->rx_event_work);
}
Пример #24
0
A_STATUS
fwd_txdone(void *context, adf_nbuf_t nbuf)
{
  adf_nbuf_free(nbuf);  
  return A_STATUS_OK;
}
Пример #25
0
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
    unsigned int uc_tx_buf_sz,
    unsigned int uc_tx_buf_cnt,
    unsigned int uc_tx_partition_base)
{
   unsigned int  tx_buffer_count;
   unsigned int  tx_buffer_count_pwr2;
   adf_nbuf_t    buffer_vaddr;
   u_int32_t     buffer_paddr;
   u_int32_t    *header_ptr;
   u_int32_t    *ring_vaddr;
   int           return_code = 0;
   uint16_t     idx;

   /* Allocate CE Write Index WORD */
   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                4,
                &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
      return -1;
   }

   /* Allocate TX COMP Ring */
   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                uc_tx_buf_cnt * 4,
                &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
      adf_os_print("%s: TX COMP ring alloc fail", __func__);
      return_code = -2;
      goto free_tx_ce_idx;
   }

   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);

   /* Allocate TX BUF vAddress Storage */
   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
         (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
                          uc_tx_buf_cnt * sizeof(adf_nbuf_t));
   if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
      adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
                   __func__);
      return_code = -3;
      goto free_tx_comp_base;
   }
   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
                   uc_tx_buf_cnt * sizeof(adf_nbuf_t));

   ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
   /* Allocate TX buffers as many as possible */
   for (tx_buffer_count = 0;
        tx_buffer_count < (uc_tx_buf_cnt - 1);
        tx_buffer_count++) {
      buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
                uc_tx_buf_sz, 0, 4, FALSE);
      if (!buffer_vaddr)
      {
         adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
                      __func__, tx_buffer_count);
         break;
      }

      /* Init buffer */
      adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
      header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);

      *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
      header_ptr++;
      *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;

      adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
      buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
      header_ptr++;
      *header_ptr = (u_int32_t)(buffer_paddr + 16);

      header_ptr++;
      *header_ptr = 0xFFFFFFFF;

      /* FRAG Header */
      header_ptr++;
      *header_ptr = buffer_paddr + 32;

      *ring_vaddr = buffer_paddr;
      pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
            buffer_vaddr;
      /* Memory barrier to ensure actual value updated */

      ring_vaddr++;
   }

   /*
    * Tx complete ring buffer count should be power of 2.
    * So, allocated Tx buffer count should be one less than ring buffer size.
    */
   tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1;
   if (tx_buffer_count > tx_buffer_count_pwr2) {
       adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d",
                   __func__, tx_buffer_count, tx_buffer_count_pwr2);

       /* Free over allocated buffers below power of 2 */
       for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
           if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
               adf_nbuf_unmap(pdev->osdev,
                   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
                   ADF_OS_DMA_FROM_DEVICE);
               adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
           }
       }
   }

   if (tx_buffer_count_pwr2 < 0) {
       adf_os_print("%s: Failed to round down Tx buffer count %d",
                   __func__, tx_buffer_count_pwr2);
       goto free_tx_comp_base;
   }

   pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2;

   return 0;

free_tx_comp_base:
   adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
free_tx_ce_idx:
   adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   return return_code;
}
Пример #26
0
      /* allow to drop the skb so drop it */
      adf_nbuf_free(skb);
      ++pAdapter->stats.tx_dropped;
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: Tx skb %p dropped, stats.tx_dropped = %ld\n",
         __func__, skb, pAdapter->stats.tx_dropped);
      return -ENOMEM;
   } else {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
                 "%s: nodrop: %p queued\n", __func__, skb);
      adf_nbuf_queue_add(&pAdapter->nodrop_queue, skb);
      adf_os_spin_lock_bh(&pAdapter->data_lock);
      if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
         pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
         adf_os_timer_mod(&pAdapter->epping_timer, TX_RETRY_TIMEOUT_IN_MS);
      }
      adf_os_spin_unlock_bh(&pAdapter->data_lock);
   }

   return 0;
}

#ifdef HIF_SDIO
HTC_SEND_FULL_ACTION epping_tx_queue_full(void *Context,
   HTC_PACKET *pPacket)
{
   epping_context_t *pEpping_ctx = (epping_context_t *)Context;
   epping_adapter_t *pAdapter = pEpping_ctx->epping_adapter;
   HTC_SEND_FULL_ACTION action = HTC_SEND_FULL_KEEP;
   netif_stop_queue(pAdapter->dev);
   return action;
}
#endif /* HIF_SDIO */
void epping_tx_complete_multiple(void *ctx,
   HTC_PACKET_QUEUE *pPacketQueue)
{
   epping_context_t *pEpping_ctx = (epping_context_t *)ctx;
   epping_adapter_t *pAdapter = pEpping_ctx->epping_adapter;
   struct net_device* dev = pAdapter->dev;
   A_STATUS status;
   HTC_ENDPOINT_ID eid;
   adf_nbuf_t pktSkb;
   struct epping_cookie *cookie;
   A_BOOL flushing = FALSE;
   adf_nbuf_queue_t skb_queue;
   HTC_PACKET *htc_pkt;

   adf_nbuf_queue_init(&skb_queue);

   adf_os_spin_lock_bh(&pAdapter->data_lock);

   while (!HTC_QUEUE_EMPTY(pPacketQueue)) {
      htc_pkt = HTC_PACKET_DEQUEUE(pPacketQueue);
      if (htc_pkt == NULL)
         break;
      status=htc_pkt->Status;
      eid=htc_pkt->Endpoint;
      pktSkb=GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
      cookie = htc_pkt->pPktContext;

      ASSERT(pktSkb);
      ASSERT(htc_pkt->pBuffer == adf_nbuf_data(pktSkb));

      /* add this to the list, use faster non-lock API */
      adf_nbuf_queue_add(&skb_queue,pktSkb);

      if (A_SUCCESS(status)) {
         ASSERT(htc_pkt->ActualLength == adf_nbuf_len(pktSkb));
      }
      EPPING_LOG(VOS_TRACE_LEVEL_INFO,
         "%s skb=%p data=%p len=0x%x eid=%d ",
         __func__, pktSkb, htc_pkt->pBuffer,
         htc_pkt->ActualLength, eid);

      if (A_FAILED(status)) {
         if (status == A_ECANCELED) {
            /* a packet was flushed  */
            flushing = TRUE;
         }
         if (status != A_NO_RESOURCE) {
            printk("%s() -TX ERROR, status: 0x%x\n", __func__,
               status);
         }
      } else {
         EPPING_LOG(VOS_TRACE_LEVEL_INFO, "%s: OK\n", __func__);
         flushing = FALSE;
      }

      epping_free_cookie(pAdapter->pEpping_ctx, cookie);
   }

   adf_os_spin_unlock_bh(&pAdapter->data_lock);

   /* free all skbs in our local list */
   while (adf_nbuf_queue_len(&skb_queue)) {
      /* use non-lock version */
      pktSkb = adf_nbuf_queue_remove(&skb_queue);
      if (pktSkb == NULL)
         break;
      adf_nbuf_free(pktSkb);
      pEpping_ctx->total_tx_acks++;
   }

   if (!flushing) {
      netif_wake_queue(dev);
   }
}
int epping_tx_send(adf_nbuf_t skb, epping_adapter_t *pAdapter)
{
   adf_nbuf_t nodrop_skb;
   EPPING_HEADER *eppingHdr;
   A_UINT8 ac = 0;

   eppingHdr = (EPPING_HEADER *)adf_nbuf_data(skb);

   if (!IS_EPPING_PACKET(eppingHdr)) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: Recived non endpoint ping packets\n", __func__);
      /* no packet to send, cleanup */
      adf_nbuf_free(skb);
      return -ENOMEM;
   }

   /* the stream ID is mapped to an access class */
   ac = eppingHdr->StreamNo_h;
   /* hard coded two ep ids */
   if (ac != 0 && ac != 1) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: ac %d is not mapped to mboxping service\n", __func__, ac);
      adf_nbuf_free(skb);
      return -ENOMEM;
   }

   /*
    * some EPPING packets cannot be dropped no matter what access class
    * it was sent on. A special care has been taken:
    * 1. when there is no TX resource, queue the control packets to
    *    a special queue
    * 2. when there is TX resource, send the queued control packets first
    *    and then other packets
    * 3. a timer launches to check if there is queued control packets and
    *    flush them
    */

   /* check the nodrop queue first */
   while ((nodrop_skb = adf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
      HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, TRUE);
      if (epping_tx_send_int(nodrop_skb, pAdapter)) {
         EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
            "%s: nodrop: %p xmit fail\n", __func__, nodrop_skb);
         /* fail to xmit so put the nodrop packet to the nodrop queue */
         adf_nbuf_queue_insert_head(&pAdapter->nodrop_queue, nodrop_skb);
         /* no cookie so free the current skb */
         goto tx_fail;
      } else {
         HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, FALSE);
         EPPING_LOG(VOS_TRACE_LEVEL_INFO,
            "%s: nodrop: %p xmit ok\n", __func__, nodrop_skb);
      }
   }

   /* send the original packet */
   if (epping_tx_send_int(skb, pAdapter))
      goto tx_fail;

   return 0;

tx_fail:
   if (!IS_EPING_PACKET_NO_DROP(eppingHdr)) {
      /* allow to drop the skb so drop it */
      adf_nbuf_free(skb);
      ++pAdapter->stats.tx_dropped;
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: Tx skb %p dropped, stats.tx_dropped = %ld\n",
         __func__, skb, pAdapter->stats.tx_dropped);
      return -ENOMEM;
   } else {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
                 "%s: nodrop: %p queued\n", __func__, skb);
      adf_nbuf_queue_add(&pAdapter->nodrop_queue, skb);
      adf_os_spin_lock_bh(&pAdapter->data_lock);
      if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
         pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
         adf_os_timer_mod(&pAdapter->epping_timer, TX_RETRY_TIMEOUT_IN_MS);
      }
      adf_os_spin_unlock_bh(&pAdapter->data_lock);
   }

   return 0;
}
Пример #28
0
hif_status_t HTCTxCompletionHandler(void *Context, adf_nbuf_t netbuf)
{
    HTC_TARGET *target = (HTC_TARGET *)Context;
    //adf_os_handle_t os_hdl = target->os_handle;
    a_uint8_t *netdata;
    a_uint32_t netlen;
    HTC_FRAME_HDR *HtcHdr;
    a_uint8_t EpID;
    HTC_ENDPOINT *pEndpoint;
#ifndef HTC_HOST_CREDIT_DIST
    a_int32_t i;
#endif
    
    adf_nbuf_peek_header(netbuf, &netdata, &netlen);

    HtcHdr = (HTC_FRAME_HDR *)netdata;

    EpID = HtcHdr->EndpointID;
    pEndpoint = &target->EndPoint[EpID];

    if (EpID == ENDPOINT0) {
        adf_nbuf_free(netbuf);
    }
    else {
        /* gather tx completion counts */
        //HTC_AGGRNUM_REC *pAggrNumRec;

        //LOCK_HTC_TX(target);
        //pAggrNumRec = HTC_GET_REC_AT_HEAD(&pEndpoint->AggrNumRecQueue);
        
        //aggrNum = pAggrNumRec->AggrNum;
        //UNLOCK_HTC_TX(target);

        //if ((++pEndpoint->CompletedTxCnt) == aggrNum) {
            
            //pEndpoint->CompletedTxCnt = 0;

#if 0
            LOCK_HTC_TX(target);
            
            /* Dequeue from endpoint and then enqueue to target */
            pAggrNumRec = HTC_AGGRNUMREC_DEQUEUE(&pEndpoint->AggrNumRecQueue);
            HTC_AGGRNUMREC_ENQUEUE(&target->FreeAggrNumRecQueue, pAggrNumRec);
            
            UNLOCK_HTC_TX(target);
#endif

            /* remove HTC header */
            adf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH);

            #if 1    /* freeing the net buffer instead of handing this buffer to upper layer driver */
            /* nofity upper layer */            
            if (pEndpoint->EpCallBacks.EpTxComplete) {
                /* give the packet to the upper layer */
                pEndpoint->EpCallBacks.EpTxComplete(/*dev*/pEndpoint->EpCallBacks.pContext, netbuf, EpID/*aggrNum*/);
            }
            else {
                adf_nbuf_free(netbuf);
            }
            #else
            adf_nbuf_free(netbuf);
            #endif
        //}

    }

#ifndef HTC_HOST_CREDIT_DIST
    /* Check whether there is any pending buffer needed */
    /* to be sent */
    if (pEndpoint->UL_PipeID == 1) {
        if (HTCNeedReschedule(target, pEndpoint) == A_OK) {
            for (i = ENDPOINT_MAX - 1; i >= 0; i--) {
                pEndpoint = &target->EndPoint[i];
                
                if (HTCGetTxBufCnt(target, pEndpoint) > 0)  {
                        HTCTrySend(target, NULL, ADF_NBUF_NULL, ADF_NBUF_NULL);
                        break;
                }
            }
        }
    }
#endif    
    
    return HIF_OK;
}
Пример #29
0
A_STATUS HTCRxCompletionHandler(
    void *Context, adf_nbuf_t netbuf, a_uint8_t pipeID)
{
    A_STATUS        status = A_OK;
    HTC_FRAME_HDR   *HtcHdr;
    HTC_TARGET      *target = (HTC_TARGET *)Context;
    a_uint8_t       *netdata;
    a_uint32_t      netlen;
    HTC_ENDPOINT    *pEndpoint;
    HTC_PACKET      *pPacket;
    A_UINT16        payloadLen;
    a_uint32_t      trailerlen = 0;
    A_UINT8         htc_ep_id;

#ifdef RX_SG_SUPPORT
    LOCK_HTC_RX(target);
    if (target->IsRxSgInprogress) {
        target->CurRxSgTotalLen += adf_nbuf_len(netbuf);
        adf_nbuf_queue_add(&target->RxSgQueue, netbuf);
        if (target->CurRxSgTotalLen == target->ExpRxSgTotalLen) {
            netbuf = RxSgToSingleNetbuf(target);
            if (netbuf == NULL) {
                UNLOCK_HTC_RX(target);
                goto _out;
            }
        }
        else {
            netbuf = NULL;
            UNLOCK_HTC_RX(target);
            goto _out;
        }
    }
    UNLOCK_HTC_RX(target);
#endif

    netdata = adf_nbuf_data(netbuf);
    netlen = adf_nbuf_len(netbuf);

    HtcHdr = (HTC_FRAME_HDR *)netdata;

    do {

        htc_ep_id = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, ENDPOINTID);
        pEndpoint = &target->EndPoint[htc_ep_id];

        if (htc_ep_id >= ENDPOINT_MAX) {
            AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx: invalid EndpointID=%d\n",htc_ep_id));
            DebugDumpBytes((A_UINT8 *)HtcHdr,sizeof(HTC_FRAME_HDR),"BAD HTC Header");
            status = A_ERROR;
            break;
        }

        /*
         * If this endpoint that received a message from the target has
         * a to-target HIF pipe whose send completions are polled rather
         * than interrupt-driven, this is a good point to ask HIF to check
         * whether it has any completed sends to handle.
         */
        if (pEndpoint->ul_is_polled) {
            HTCSendCompleteCheck(pEndpoint, 1);
        }

        payloadLen = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, PAYLOADLEN);

        if (netlen < (payloadLen + HTC_HDR_LENGTH)) {
#ifdef RX_SG_SUPPORT
            LOCK_HTC_RX(target);
            target->IsRxSgInprogress = TRUE;
            adf_nbuf_queue_init(&target->RxSgQueue);
            adf_nbuf_queue_add(&target->RxSgQueue, netbuf);
            target->ExpRxSgTotalLen = (payloadLen + HTC_HDR_LENGTH);
            target->CurRxSgTotalLen += netlen;
            UNLOCK_HTC_RX(target);
            netbuf = NULL;
            break;
#else
            AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx: insufficient length, got:%d expected =%d\n",
                netlen, payloadLen + HTC_HDR_LENGTH));
            DebugDumpBytes((A_UINT8 *)HtcHdr,sizeof(HTC_FRAME_HDR),"BAD RX packet length");
            status = A_ERROR;
            break;
#endif
        }

#ifdef HTC_EP_STAT_PROFILING
        LOCK_HTC_RX(target);
        INC_HTC_EP_STAT(pEndpoint,RxReceived,1);
        UNLOCK_HTC_RX(target);
#endif

        //if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) {
        {
            A_UINT8         temp;
                /* get flags to check for trailer */
            temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, FLAGS);
            if (temp & HTC_FLAGS_RECV_TRAILER) {
                    /* extract the trailer length */
                temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, CONTROLBYTES0);
                if ((temp < sizeof(HTC_RECORD_HDR)) || (temp > payloadLen)) {
                    AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
                        ("HTCProcessRecvHeader, invalid header (payloadlength should be :%d, CB[0] is:%d) \n",
                            payloadLen, temp));
                    status = A_EPROTO;
                    break;
                }

                trailerlen = temp;
                    /* process trailer data that follows HDR + application payload */
                status = HTCProcessTrailer(target,
                                           ((A_UINT8 *)HtcHdr + HTC_HDR_LENGTH + payloadLen - temp),
                                           temp, htc_ep_id);
                if (A_FAILED(status)) {
                    break;
                }

            }
        }

        if (((int)payloadLen - (int)trailerlen) <= 0) {
            /* zero length packet with trailer data, just drop these */
            break;
        }


        if (htc_ep_id == ENDPOINT_0) {
            A_UINT16 message_id;
            HTC_UNKNOWN_MSG *htc_msg;

                /* remove HTC header */
            adf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH);
            netdata = adf_nbuf_data(netbuf);
            netlen = adf_nbuf_len(netbuf);
            htc_msg = (HTC_UNKNOWN_MSG*)netdata;
            message_id = HTC_GET_FIELD(htc_msg, HTC_UNKNOWN_MSG, MESSAGEID);

            switch (message_id) {
            default:
                /* handle HTC control message */
                if (target->CtrlResponseProcessing) {
                    /* this is a fatal error, target should not be sending unsolicited messages
                     * on the endpoint 0 */
                    AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HTC Rx Ctrl still processing\n"));
                    status = A_ERROR;
                    break;
                }

                LOCK_HTC_RX(target);
                target->CtrlResponseLength = min((int)netlen,HTC_MAX_CONTROL_MESSAGE_LENGTH);
                A_MEMCPY(target->CtrlResponseBuffer,netdata,target->CtrlResponseLength);
                UNLOCK_HTC_RX(target);

                adf_os_mutex_release(target->osdev, &target->CtrlResponseValid);
                break;
            case HTC_MSG_SEND_SUSPEND_COMPLETE:
                target->HTCInitInfo.TargetSendSuspendComplete(target->HTCInitInfo.pContext);
                break;
            }

            adf_nbuf_free(netbuf);
            netbuf = NULL;
            break;
        }

            /* the current message based HIF architecture allocates net bufs for recv packets
             * since this layer bridges that HIF to upper layers , which expects HTC packets,
             * we form the packets here
             * TODO_FIXME */
        pPacket  = AllocateHTCPacketContainer(target);
        if (NULL == pPacket) {
            status = A_NO_RESOURCE;
            break;
        }
        pPacket->Status = A_OK;
        pPacket->Endpoint = htc_ep_id; 
        pPacket->pPktContext = netbuf;
        pPacket->pBuffer = adf_nbuf_data(netbuf) + HTC_HDR_LENGTH;
        pPacket->ActualLength = netlen - HTC_HEADER_LEN - trailerlen;

            /* TODO : this is a hack because the driver layer will set the actual length
             * of the skb again which will just double the length */
        //A_NETBUF_TRIM(netbuf,netlen);
        adf_nbuf_trim_tail(netbuf, netlen);

        RecvPacketCompletion(target,pEndpoint,pPacket);
            /* recover the packet container */
        FreeHTCPacketContainer(target,pPacket);
        netbuf = NULL;

    } while(FALSE);

#ifdef RX_SG_SUPPORT
_out:
#endif
    if (netbuf != NULL) {
        adf_nbuf_free(netbuf);
    }

    return status;

}
void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
{
	u_int32_t id;
	u_int8_t *data;
	u_int32_t len;
	void *wmi_cmd_struct_ptr = NULL;
	int tlv_ok_status = 0;

	id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);

	if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
		goto end;

	data = adf_nbuf_data(evt_buf);
	len = adf_nbuf_len(evt_buf);

	/* Validate and pad(if necessary) the TLVs */
	tlv_ok_status = wmitlv_check_and_pad_event_tlvs(wmi_handle->scn_handle,
							data, len, id,
							&wmi_cmd_struct_ptr);
	if (tlv_ok_status != 0) {
			pr_err("%s: Error: id=0x%d, wmitlv_check_and_pad_tlvs ret=%d\n",
				__func__, id, tlv_ok_status);
			goto end;
	}

#ifdef FEATURE_WLAN_D0WOW
	if (wmi_get_d0wow_flag(wmi_handle))
		pr_debug("%s: WMI event ID is 0x%x\n", __func__, id);
#endif

	if (id >= WMI_EVT_GRP_START_ID(WMI_GRP_START)) {
		u_int32_t idx = 0;

		idx = wmi_unified_get_event_handler_ix(wmi_handle, id) ;
		if (idx == -1) {
			pr_err("%s : event handler is not registered: event id 0x%x\n",
			       __func__, id);
			goto end;
		}

#ifdef WMI_INTERFACE_EVENT_LOGGING
		adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
		/* Exclude 4 bytes of TLV header */
		WMI_EVENT_RECORD(id, ((u_int8_t *)data + 4));
		adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif
		/* Call the WMI registered event handler */
		wmi_handle->event_handler[idx](wmi_handle->scn_handle,
					       wmi_cmd_struct_ptr, len);
		goto end;
	}

	switch (id) {
	default:
		pr_info("%s: Unhandled WMI event %d\n", __func__, id);
		break;
	case WMI_SERVICE_READY_EVENTID:
		pr_info("%s: WMI UNIFIED SERVICE READY event\n", __func__);
		wma_rx_service_ready_event(wmi_handle->scn_handle,
					   wmi_cmd_struct_ptr);
		break;
	case WMI_READY_EVENTID:
		pr_info("%s:  WMI UNIFIED READY event\n", __func__);
		wma_rx_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr);
		break;
	}
end:
	wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr);
	adf_nbuf_free(evt_buf);
}