示例#1
0
文件: htc_recv.c 项目: KHATEEBNSIT/AP
adf_nbuf_t RxSgToSingleNetbuf(HTC_TARGET *target)
{
    adf_nbuf_t skb;
    a_uint8_t *anbdata;
    a_uint8_t *anbdata_new;
    a_uint32_t anblen;
    adf_nbuf_t new_skb = NULL;
    a_uint32_t sg_queue_len;
    adf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue;

    sg_queue_len = adf_nbuf_queue_len(rx_sg_queue);

    if (sg_queue_len <= 1) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
            ("RxSgToSingleNetbuf: invalid sg queue len %u\n"));
        goto _failed;
    }

    new_skb = adf_nbuf_alloc(target->ExpRxSgTotalLen, 0, 4, FALSE);
    if (new_skb == NULL) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
            ("RxSgToSingleNetbuf: can't allocate %u size netbuf\n",
                target->ExpRxSgTotalLen));
        goto _failed;
    }

    adf_nbuf_peek_header(new_skb, &anbdata_new, &anblen);

    skb = adf_nbuf_queue_remove(rx_sg_queue);
    do {
        adf_nbuf_peek_header(skb, &anbdata, &anblen);
        adf_os_mem_copy(anbdata_new, anbdata, adf_nbuf_len(skb));
        adf_nbuf_put_tail(new_skb, adf_nbuf_len(skb));
        anbdata_new += adf_nbuf_len(skb);
        adf_nbuf_free(skb);
        skb = adf_nbuf_queue_remove(rx_sg_queue);
    } while(skb != NULL);

    RESET_RX_SG_CONFIG(target);
    return new_skb;

_failed:

    while ((skb = adf_nbuf_queue_remove(rx_sg_queue)) != NULL) {
        adf_nbuf_free(skb);
    }

    RESET_RX_SG_CONFIG(target);
    return NULL;
}
示例#2
0
void
ieee80211_recv_mgmt_defer(void *arg)
{
    struct ieee80211com *ic = (struct ieee80211com *)arg;
    struct ieee80211_recv_mgt_args *entry = NULL;
    wbuf_t  wbuf = ADF_NBUF_NULL;
    struct ieee80211_rx_status  rs;
     struct ieee80211vap *vap ;
    
    /* Dequeue and process */
    do {

        OS_MGMT_LOCKBH(&ic->ic_mgmt_lock);
        wbuf = adf_nbuf_queue_remove(&ic->ic_mgmt_nbufqueue);
        OS_MGMT_UNLOCKBH(&ic->ic_mgmt_lock);

        if(!wbuf)
            break;
        entry = (struct ieee80211_recv_mgt_args *)adf_nbuf_get_priv(wbuf);
        ieee80211_recv_mgmt( entry->ni, wbuf, entry->subtype, &rs);

        vap = entry->ni->ni_vap;
        if (vap->iv_evtable) {
            vap->iv_evtable->wlan_receive(vap->iv_ifp,wbuf,IEEE80211_FC0_TYPE_MGT,entry->subtype, &rs);
        }

    }while(1);
    atomic_set(&ic->ic_mgmt_deferflags, DEFER_DONE);
    return;
}
void __wmi_rx_event_work(struct work_struct *work)
{
	struct wmi_unified *wmi = container_of(work, struct wmi_unified,
					       rx_event_work);
	wmi_buf_t buf;

	adf_os_spin_lock_bh(&wmi->eventq_lock);
	buf = adf_nbuf_queue_remove(&wmi->event_queue);
	adf_os_spin_unlock_bh(&wmi->eventq_lock);
	while (buf) {
		__wmi_control_rx(wmi, buf);
		adf_os_spin_lock_bh(&wmi->eventq_lock);
		buf = adf_nbuf_queue_remove(&wmi->event_queue);
		adf_os_spin_unlock_bh(&wmi->eventq_lock);
	}
}
/**
 * wmi_unified_remove_work() - detach for WMI work
 * @wmi_handle: handle to WMI
 *
 * A function that does not fully detach WMI, but just remove work
 * queue items associated with it. This is used to make sure that
 * before any other processing code that may destroy related contexts
 * (HTC, etc), work queue processing on WMI has already been stopped.
 *
 * Return: void.
 */
void
wmi_unified_remove_work(struct wmi_unified* wmi_handle)
{
	wmi_buf_t buf;

	VOS_TRACE( VOS_MODULE_ID_WDA, VOS_TRACE_LEVEL_INFO,
		"Enter: %s", __func__);
	vos_flush_work(&wmi_handle->rx_event_work);
	adf_os_spin_lock_bh(&wmi_handle->eventq_lock);
	buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
	while (buf) {
		adf_nbuf_free(buf);
		buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
	}
	adf_os_spin_unlock_bh(&wmi_handle->eventq_lock);
	VOS_TRACE( VOS_MODULE_ID_WDA, VOS_TRACE_LEVEL_INFO,
		"Done: %s", __func__);
}
void
wmi_unified_detach(struct wmi_unified* wmi_handle)
{
    wmi_buf_t buf;

    vos_flush_work(&wmi_handle->rx_event_work);
    adf_os_spin_lock_bh(&wmi_handle->eventq_lock);
    buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
    while (buf) {
	adf_nbuf_free(buf);
	buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
    }
    adf_os_spin_unlock_bh(&wmi_handle->eventq_lock);
    if (wmi_handle != NULL) {
        OS_FREE(wmi_handle);
        wmi_handle = NULL;
    }
}
void epping_tx_timer_expire(epping_adapter_t *pAdapter)
{
   adf_nbuf_t nodrop_skb;

   EPPING_LOG(VOS_TRACE_LEVEL_INFO, "%s: queue len: %d\n", __func__,
      adf_nbuf_queue_len(&pAdapter->nodrop_queue));

   if (!adf_nbuf_queue_len(&pAdapter->nodrop_queue)) {
      /* nodrop queue is empty so no need to arm timer */
      pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
      return;
   }

   /* try to flush nodrop queue */
   while ((nodrop_skb = adf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
      HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, TRUE);
      if (epping_tx_send_int(nodrop_skb, pAdapter)) {
         EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
            "%s: nodrop: %p xmit fail in timer\n", __func__, nodrop_skb);
         /* fail to xmit so put the nodrop packet to the nodrop queue */
         adf_nbuf_queue_insert_head(&pAdapter->nodrop_queue, nodrop_skb);
         break;
      } else {
         HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, FALSE);
         EPPING_LOG(VOS_TRACE_LEVEL_INFO,
            "%s: nodrop: %p xmit ok in timer\n", __func__, nodrop_skb);
      }
   }

   /* if nodrop queue is not empty, continue to arm timer */
   if (nodrop_skb) {
      adf_os_spin_lock_bh(&pAdapter->data_lock);
      /* if nodrop queue is not empty, continue to arm timer */
      if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
         pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
         adf_os_timer_mod(&pAdapter->epping_timer, TX_RETRY_TIMEOUT_IN_MS);
      }
      adf_os_spin_unlock_bh(&pAdapter->data_lock);
   } else {
      pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
   }
}
示例#7
0
文件: htc.c 项目: KHATEEBNSIT/AP
/* stop HTC communications, i.e. stop interrupt reception, and flush all queued buffers */
void HTCStop(HTC_HANDLE HTCHandle)
{
    HTC_TARGET    *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
    int           i;
    HTC_ENDPOINT  *pEndpoint;
#ifdef RX_SG_SUPPORT
    adf_nbuf_t netbuf;
    adf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue;
#endif

    AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCStop \n"));

        /* cleanup endpoints */
    for (i = 0; i < ENDPOINT_MAX; i++) {
        pEndpoint = &target->EndPoint[i];
        HTCFlushRxHoldQueue(target,pEndpoint);
        HTCFlushEndpointTX(target,pEndpoint,HTC_TX_PACKET_TAG_ALL);
    }

    /* Note: HTCFlushEndpointTX for all endpoints should be called before
     * HIFStop - otherwise HTCTxCompletionHandler called from
     * hif_send_buffer_cleanup_on_pipe for residual tx frames in HIF layer,
     * might queue the packet again to HIF Layer - which could cause tx
     * buffer leak
     */

    HIFStop(target->hif_dev);

#ifdef RX_SG_SUPPORT
    LOCK_HTC_RX(target);
    while ((netbuf = adf_nbuf_queue_remove(rx_sg_queue)) != NULL) {
        adf_nbuf_free(netbuf);
    }
    RESET_RX_SG_CONFIG(target);
    UNLOCK_HTC_RX(target);
#endif

    ResetEndpointStates(target);

    AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCStop \n"));
}
示例#8
0
void _ath_htc_netdeferfn_cleanup(struct ieee80211com *ic)
{

	wbuf_t wbuf = ADF_NBUF_NULL ;
	nawds_dentry_t * nawds_entry = NULL ;

	/* Freeing MGMT defer buffer */
	do {

		OS_MGMT_LOCKBH(&ic->ic_mgmt_lock);
		wbuf = adf_nbuf_queue_remove(&ic->ic_mgmt_nbufqueue);
		OS_MGMT_UNLOCKBH(&ic->ic_mgmt_lock);

		if(!wbuf)
			break;
		wbuf_free(wbuf);
	}while(wbuf);

	atomic_set(&ic->ic_mgmt_deferflags, DEFER_DONE);


	do {

		OS_NAWDSDEFER_LOCKBH(&ic->ic_nawdsdefer_lock);
		nawds_entry = TAILQ_FIRST(&ic->ic_nawdslearnlist);
		if(nawds_entry)
			TAILQ_REMOVE(&ic->ic_nawdslearnlist,nawds_entry,nawds_dlist);
		OS_NAWDSDEFER_UNLOCKBH(&ic->ic_nawdsdefer_lock);
		if(!nawds_entry)
			break;

		OS_FREE(nawds_entry);

	}while(1);
	atomic_set(&ic->ic_nawds_deferflags, DEFER_DONE);

}
   /*
   * Call netif_stop_queue frequently will impact the mboxping tx t-put.
   * Return HTC_SEND_FULL_KEEP directly in epping_tx_queue_full to avoid.
   */
   return HTC_SEND_FULL_KEEP;
}
#endif /* HIF_SDIO */
void epping_tx_complete_multiple(void *ctx,
   HTC_PACKET_QUEUE *pPacketQueue)
{
   epping_context_t *pEpping_ctx = (epping_context_t *)ctx;
   epping_adapter_t *pAdapter = pEpping_ctx->epping_adapter;
   struct net_device* dev = pAdapter->dev;
   A_STATUS status;
   HTC_ENDPOINT_ID eid;
   adf_nbuf_t pktSkb;
   struct epping_cookie *cookie;
   A_BOOL flushing = FALSE;
   adf_nbuf_queue_t skb_queue;
   HTC_PACKET *htc_pkt;

   adf_nbuf_queue_init(&skb_queue);

   adf_os_spin_lock_bh(&pAdapter->data_lock);

   while (!HTC_QUEUE_EMPTY(pPacketQueue)) {
      htc_pkt = HTC_PACKET_DEQUEUE(pPacketQueue);
      if (htc_pkt == NULL)
         break;
      status=htc_pkt->Status;
      eid=htc_pkt->Endpoint;
      pktSkb=GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
      cookie = htc_pkt->pPktContext;

      if (!pktSkb) {
         EPPING_LOG(VOS_TRACE_LEVEL_ERROR,
            "%s: pktSkb is NULL", __func__);
         ASSERT(0);
      } else {
         if (htc_pkt->pBuffer != adf_nbuf_data(pktSkb)) {
            EPPING_LOG(VOS_TRACE_LEVEL_ERROR,
               "%s: htc_pkt buffer not equal to skb->data", __func__);
            ASSERT(0);
         }

         /* add this to the list, use faster non-lock API */
         adf_nbuf_queue_add(&skb_queue,pktSkb);

         if (A_SUCCESS(status))
            if (htc_pkt->ActualLength != adf_nbuf_len(pktSkb)) {
               EPPING_LOG(VOS_TRACE_LEVEL_ERROR,
                  "%s: htc_pkt length not equal to skb->len", __func__);
               ASSERT(0);
            }
      }

      EPPING_LOG(VOS_TRACE_LEVEL_INFO,
         "%s skb=%p data=%p len=0x%x eid=%d ",
         __func__, pktSkb, htc_pkt->pBuffer,
         htc_pkt->ActualLength, eid);

      if (A_FAILED(status)) {
         if (status == A_ECANCELED) {
            /* a packet was flushed  */
            flushing = TRUE;
         }
         if (status != A_NO_RESOURCE) {
            printk("%s() -TX ERROR, status: 0x%x\n", __func__,
               status);
         }
      } else {
         EPPING_LOG(VOS_TRACE_LEVEL_INFO, "%s: OK\n", __func__);
         flushing = FALSE;
      }

      epping_free_cookie(pAdapter->pEpping_ctx, cookie);
   }

   adf_os_spin_unlock_bh(&pAdapter->data_lock);

   /* free all skbs in our local list */
   while (adf_nbuf_queue_len(&skb_queue)) {
      /* use non-lock version */
      pktSkb = adf_nbuf_queue_remove(&skb_queue);
      if (pktSkb == NULL)
         break;
      adf_nbuf_tx_free(pktSkb, ADF_NBUF_PKT_ERROR);
      pEpping_ctx->total_tx_acks++;
   }

   if (!flushing) {
      netif_wake_queue(dev);
   }
}
int epping_tx_send(adf_nbuf_t skb, epping_adapter_t *pAdapter)
{
   adf_nbuf_t nodrop_skb;
   EPPING_HEADER *eppingHdr;
   A_UINT8 ac = 0;

   eppingHdr = (EPPING_HEADER *)adf_nbuf_data(skb);

   if (!IS_EPPING_PACKET(eppingHdr)) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: Recived non endpoint ping packets\n", __func__);
      /* no packet to send, cleanup */
      adf_nbuf_free(skb);
      return -ENOMEM;
   }

   /* the stream ID is mapped to an access class */
   ac = eppingHdr->StreamNo_h;
   /* hard coded two ep ids */
   if (ac != 0 && ac != 1) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: ac %d is not mapped to mboxping service\n", __func__, ac);
      adf_nbuf_free(skb);
      return -ENOMEM;
   }

   /*
    * some EPPING packets cannot be dropped no matter what access class
    * it was sent on. A special care has been taken:
    * 1. when there is no TX resource, queue the control packets to
    *    a special queue
    * 2. when there is TX resource, send the queued control packets first
    *    and then other packets
    * 3. a timer launches to check if there is queued control packets and
    *    flush them
    */

   /* check the nodrop queue first */
   while ((nodrop_skb = adf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
      HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, TRUE);
      if (epping_tx_send_int(nodrop_skb, pAdapter)) {
         EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
            "%s: nodrop: %p xmit fail\n", __func__, nodrop_skb);
         /* fail to xmit so put the nodrop packet to the nodrop queue */
         adf_nbuf_queue_insert_head(&pAdapter->nodrop_queue, nodrop_skb);
         /* no cookie so free the current skb */
         goto tx_fail;
      } else {
         HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, FALSE);
         EPPING_LOG(VOS_TRACE_LEVEL_INFO,
            "%s: nodrop: %p xmit ok\n", __func__, nodrop_skb);
      }
   }

   /* send the original packet */
   if (epping_tx_send_int(skb, pAdapter))
      goto tx_fail;

   return 0;

tx_fail:
   if (!IS_EPING_PACKET_NO_DROP(eppingHdr)) {
      /* allow to drop the skb so drop it */
      adf_nbuf_free(skb);
      ++pAdapter->stats.tx_dropped;
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: Tx skb %p dropped, stats.tx_dropped = %ld\n",
         __func__, skb, pAdapter->stats.tx_dropped);
      return -ENOMEM;
   } else {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
                 "%s: nodrop: %p queued\n", __func__, skb);
      adf_nbuf_queue_add(&pAdapter->nodrop_queue, skb);
      adf_os_spin_lock_bh(&pAdapter->data_lock);
      if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
         pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
         adf_os_timer_mod(&pAdapter->epping_timer, TX_RETRY_TIMEOUT_IN_MS);
      }
      adf_os_spin_unlock_bh(&pAdapter->data_lock);
   }

   return 0;
}