void __wmi_rx_event_work(struct work_struct *work)
{
	struct wmi_unified *wmi = container_of(work, struct wmi_unified,
					       rx_event_work);
	wmi_buf_t buf;

	adf_os_spin_lock_bh(&wmi->eventq_lock);
	buf = adf_nbuf_queue_remove(&wmi->event_queue);
	adf_os_spin_unlock_bh(&wmi->eventq_lock);
	while (buf) {
		__wmi_control_rx(wmi, buf);
		adf_os_spin_lock_bh(&wmi->eventq_lock);
		buf = adf_nbuf_queue_remove(&wmi->event_queue);
		adf_os_spin_unlock_bh(&wmi->eventq_lock);
	}
}
void
ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
{
    adf_os_spin_lock_bh(&pdev->tx_mutex);
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
    tx_desc->pkt_type = ol_tx_frm_freed;
#ifdef QCA_COMPUTE_TX_DELAY
    tx_desc->entry_timestamp_ticks = 0xffffffff;
#endif
#endif
    ((union ol_tx_desc_list_elem_t *)tx_desc)->next =
        pdev->tx_desc.freelist;
    pdev->tx_desc.freelist = (union ol_tx_desc_list_elem_t *) tx_desc;
    pdev->tx_desc.num_free++;
#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
#ifdef QCA_LL_TX_FLOW_CT
    if ( (adf_os_atomic_read(&tx_desc->vdev->os_q_paused)) &&
            (adf_os_atomic_read(&tx_desc->vdev->tx_desc_count) <
             TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK) ) {
        /* wakeup netif_queue */
        adf_os_atomic_set(&tx_desc->vdev->os_q_paused, 0);
        tx_desc->vdev->osif_flow_control_cb(tx_desc->vdev->osif_dev,
                                            tx_desc->vdev->vdev_id, A_TRUE);
    }
#endif /* QCA_LL_TX_FLOW_CT */
    adf_os_atomic_dec(&tx_desc->vdev->tx_desc_count);
#endif
#if defined(CONFIG_HL_SUPPORT)
    tx_desc->vdev = NULL;
#endif
    adf_os_spin_unlock_bh(&pdev->tx_mutex);
}
void epping_free_cookie(epping_context_t*pEpping_ctx,
                        struct epping_cookie *cookie)
{
   adf_os_spin_lock_bh(&pEpping_ctx->cookie_lock);
   cookie->next = pEpping_ctx->cookie_list;
   pEpping_ctx->cookie_list = cookie;
   pEpping_ctx->cookie_count++;
   adf_os_spin_unlock_bh(&pEpping_ctx->cookie_lock);
}
/* cleanup cookie queue */
void epping_cookie_cleanup(epping_context_t*pEpping_ctx)
{
   int i;
   adf_os_spin_lock_bh(&pEpping_ctx->cookie_lock);
   pEpping_ctx->cookie_list = NULL;
   pEpping_ctx->cookie_count = 0;
   adf_os_spin_unlock_bh(&pEpping_ctx->cookie_lock);
   for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) {
      if (pEpping_ctx->s_cookie_mem[i]) {
         vos_mem_free(pEpping_ctx->s_cookie_mem[i]);
         pEpping_ctx->s_cookie_mem[i] = NULL;
      }
   }
}
struct epping_cookie *epping_alloc_cookie(epping_context_t*pEpping_ctx)
{
   struct epping_cookie   *cookie;

   adf_os_spin_lock_bh(&pEpping_ctx->cookie_lock);
   cookie = pEpping_ctx->cookie_list;
   if(cookie != NULL)
   {
      pEpping_ctx->cookie_list = cookie->next;
      pEpping_ctx->cookie_count--;
   }
   adf_os_spin_unlock_bh(&pEpping_ctx->cookie_lock);
   return cookie;
}
/**
 * wmi_unified_remove_work() - detach for WMI work
 * @wmi_handle: handle to WMI
 *
 * A function that does not fully detach WMI, but just remove work
 * queue items associated with it. This is used to make sure that
 * before any other processing code that may destroy related contexts
 * (HTC, etc), work queue processing on WMI has already been stopped.
 *
 * Return: void.
 */
void
wmi_unified_remove_work(struct wmi_unified* wmi_handle)
{
	wmi_buf_t buf;

	VOS_TRACE( VOS_MODULE_ID_WDA, VOS_TRACE_LEVEL_INFO,
		"Enter: %s", __func__);
	vos_flush_work(&wmi_handle->rx_event_work);
	adf_os_spin_lock_bh(&wmi_handle->eventq_lock);
	buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
	while (buf) {
		adf_nbuf_free(buf);
		buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
	}
	adf_os_spin_unlock_bh(&wmi_handle->eventq_lock);
	VOS_TRACE( VOS_MODULE_ID_WDA, VOS_TRACE_LEVEL_INFO,
		"Done: %s", __func__);
}
void
wmi_unified_detach(struct wmi_unified* wmi_handle)
{
    wmi_buf_t buf;

    vos_flush_work(&wmi_handle->rx_event_work);
    adf_os_spin_lock_bh(&wmi_handle->eventq_lock);
    buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
    while (buf) {
	adf_nbuf_free(buf);
	buf = adf_nbuf_queue_remove(&wmi_handle->event_queue);
    }
    adf_os_spin_unlock_bh(&wmi_handle->eventq_lock);
    if (wmi_handle != NULL) {
        OS_FREE(wmi_handle);
        wmi_handle = NULL;
    }
}
示例#8
0
static void
ol_ath_node_cleanup(struct ieee80211_node *ni)
{
    struct ieee80211com *ic = ni->ni_ic;
    struct ol_ath_softc_net80211 *scn = OL_ATH_SOFTC_NET80211(ic);
    struct ol_ath_vap_net80211 *avn = OL_ATH_VAP_NET80211(ni->ni_vap);
    u_int32_t peer_tid_bitmap = 0xffffffff; /* TBD : fill with all valid TIDs */

    /* flush all TIDs except MGMT TID for this peer in Target */
    peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);
    if (wmi_unified_peer_flush_tids_send(scn->wmi_handle, ni->ni_macaddr, peer_tid_bitmap, avn->av_if_id)) {
        printk("%s : Unable to Flush tids peer in Target \n", __func__);
    }
    /* TBD: Cleanup the key index mapping */

    adf_os_spin_lock_bh(&scn->scn_lock);
    if ((OL_ATH_NODE_NET80211(ni))->an_txrx_handle) {
        ol_txrx_peer_detach( (OL_ATH_NODE_NET80211(ni))->an_txrx_handle);

#if ATH_SUPPORT_GREEN_AP
        if ((ic->ic_opmode == IEEE80211_M_HOSTAP) && (ni != ni->ni_bss_node)) {
            ath_green_ap_state_mc(ic, ATH_PS_EVENT_DEC_STA);
        }
#endif  /* ATH_SUPPORT_GREEN_AP */

        /* Delete key */
        ieee80211_node_clear_keys(ni);
        /* Delete peer in Target */
        if (wmi_unified_peer_delete_send(scn->wmi_handle, ni->ni_macaddr, avn->av_if_id)) {
            printk("%s : Unable to Delete peer in Target \n", __func__);
        }
        /*
         * It is possible that a node will be cleaned up for multiple times
         * before it is freed. Make sure we only remove TxRx/FW peer once.
         */
        (OL_ATH_NODE_NET80211(ni))->an_txrx_handle = NULL;
    }
    adf_os_spin_unlock_bh(&scn->scn_lock);

    /* Call back the umac node cleanup function */
    scn->net80211_node_cleanup(ni);


}
void epping_tx_timer_expire(epping_adapter_t *pAdapter)
{
   adf_nbuf_t nodrop_skb;

   EPPING_LOG(VOS_TRACE_LEVEL_INFO, "%s: queue len: %d\n", __func__,
      adf_nbuf_queue_len(&pAdapter->nodrop_queue));

   if (!adf_nbuf_queue_len(&pAdapter->nodrop_queue)) {
      /* nodrop queue is empty so no need to arm timer */
      pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
      return;
   }

   /* try to flush nodrop queue */
   while ((nodrop_skb = adf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
      HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, TRUE);
      if (epping_tx_send_int(nodrop_skb, pAdapter)) {
         EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
            "%s: nodrop: %p xmit fail in timer\n", __func__, nodrop_skb);
         /* fail to xmit so put the nodrop packet to the nodrop queue */
         adf_nbuf_queue_insert_head(&pAdapter->nodrop_queue, nodrop_skb);
         break;
      } else {
         HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, FALSE);
         EPPING_LOG(VOS_TRACE_LEVEL_INFO,
            "%s: nodrop: %p xmit ok in timer\n", __func__, nodrop_skb);
      }
   }

   /* if nodrop queue is not empty, continue to arm timer */
   if (nodrop_skb) {
      adf_os_spin_lock_bh(&pAdapter->data_lock);
      /* if nodrop queue is not empty, continue to arm timer */
      if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
         pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
         adf_os_timer_mod(&pAdapter->epping_timer, TX_RETRY_TIMEOUT_IN_MS);
      }
      adf_os_spin_unlock_bh(&pAdapter->data_lock);
   } else {
      pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
   }
}
示例#10
0
static void
ol_ath_node_free(struct ieee80211_node *ni)
{
    struct ieee80211com *ic = ni->ni_ic;
    struct ol_ath_softc_net80211 *scn = OL_ATH_SOFTC_NET80211(ic);
    u_int32_t ni_flags = ni->ni_flags;

    adf_os_spin_lock_bh(&scn->scn_lock);
    if(scn->peer_count) {
        scn->peer_count--;
    } else {
        printk("%s ni (%p) vap (%p) scn (%p) Decremnting '0' peer count \n",
                __func__, ni, ni->ni_vap, scn);
    }
    if (ni_flags & IEEE80211_NODE_EXT_STATS) {
        scn->peer_ext_stats_count--;
    }
    adf_os_spin_unlock_bh(&scn->scn_lock);
    /* Call back the umac node free function */
    scn->net80211_node_free(ni);
}
static inline struct ol_tx_desc_t *
ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev)
{
    struct ol_tx_desc_t *tx_desc = NULL;

    adf_os_spin_lock_bh(&pdev->tx_mutex);
    if (pdev->tx_desc.freelist) {
        pdev->tx_desc.num_free--;
        tx_desc = &pdev->tx_desc.freelist->tx_desc;
        pdev->tx_desc.freelist = pdev->tx_desc.freelist->next;
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
        if (tx_desc->pkt_type != ol_tx_frm_freed
#ifdef QCA_COMPUTE_TX_DELAY
                || tx_desc->entry_timestamp_ticks != 0xffffffff
#endif
           ) {
            TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
                       "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p",
                       __func__, tx_desc->pkt_type, pdev);
#ifdef QCA_COMPUTE_TX_DELAY
            TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n",
                       __func__, tx_desc->entry_timestamp_ticks);
#endif
            adf_os_assert(0);
        }
#endif
    }
    adf_os_spin_unlock_bh(&pdev->tx_mutex);
    if (!tx_desc) {
        return NULL;
    }
    tx_desc->vdev = vdev;
#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
    adf_os_atomic_inc(&vdev->tx_desc_count);
#endif

    OL_TX_TIMESTAMP_SET(tx_desc);

    return tx_desc;
}
void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
{
	struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx;
	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
#ifdef WMI_INTERFACE_EVENT_LOGGING
	u_int32_t cmd_id;
#endif

	ASSERT(wmi_cmd_buf);
#ifdef WMI_INTERFACE_EVENT_LOGGING
	cmd_id = WMI_GET_FIELD(adf_nbuf_data(wmi_cmd_buf),
		WMI_CMD_HDR, COMMANDID);
	adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
	/* Record 16 bytes of WMI cmd tx complete data
	   - exclude TLV and WMI headers */
	WMI_COMMAND_TX_CMP_RECORD(cmd_id,
		((u_int32_t *)adf_nbuf_data(wmi_cmd_buf) + 2));
	adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif
	adf_nbuf_free(wmi_cmd_buf);
	adf_os_mem_free(htc_pkt);
	adf_os_atomic_dec(&wmi_handle->pending_cmds);
}
void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
{
	u_int32_t id;
	u_int8_t *data;
	u_int32_t len;
	void *wmi_cmd_struct_ptr = NULL;
	int tlv_ok_status = 0;

	id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);

	if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
		goto end;

	data = adf_nbuf_data(evt_buf);
	len = adf_nbuf_len(evt_buf);

	/* Validate and pad(if necessary) the TLVs */
	tlv_ok_status = wmitlv_check_and_pad_event_tlvs(wmi_handle->scn_handle,
							data, len, id,
							&wmi_cmd_struct_ptr);
	if (tlv_ok_status != 0) {
			pr_err("%s: Error: id=0x%d, wmitlv_check_and_pad_tlvs ret=%d\n",
				__func__, id, tlv_ok_status);
			goto end;
	}

#ifdef FEATURE_WLAN_D0WOW
	if (wmi_get_d0wow_flag(wmi_handle))
		pr_debug("%s: WMI event ID is 0x%x\n", __func__, id);
#endif

	if (id >= WMI_EVT_GRP_START_ID(WMI_GRP_START)) {
		u_int32_t idx = 0;

		idx = wmi_unified_get_event_handler_ix(wmi_handle, id) ;
		if (idx == -1) {
			pr_err("%s : event handler is not registered: event id 0x%x\n",
			       __func__, id);
			goto end;
		}

#ifdef WMI_INTERFACE_EVENT_LOGGING
		adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
		/* Exclude 4 bytes of TLV header */
		WMI_EVENT_RECORD(id, ((u_int8_t *)data + 4));
		adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif
		/* Call the WMI registered event handler */
		wmi_handle->event_handler[idx](wmi_handle->scn_handle,
					       wmi_cmd_struct_ptr, len);
		goto end;
	}

	switch (id) {
	default:
		pr_info("%s: Unhandled WMI event %d\n", __func__, id);
		break;
	case WMI_SERVICE_READY_EVENTID:
		pr_info("%s: WMI UNIFIED SERVICE READY event\n", __func__);
		wma_rx_service_ready_event(wmi_handle->scn_handle,
					   wmi_cmd_struct_ptr);
		break;
	case WMI_READY_EVENTID:
		pr_info("%s:  WMI UNIFIED READY event\n", __func__);
		wma_rx_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr);
		break;
	}
end:
	wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr);
	adf_nbuf_free(evt_buf);
}
/*
 * Temporarily added to support older WMI events. We should move all events to unified
 * when the target is ready to support it.
 */
void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
{
	struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx;
	wmi_buf_t evt_buf;
	u_int32_t len;
	void *wmi_cmd_struct_ptr = NULL;
	u_int32_t idx = 0;
	int tlv_ok_status = 0;

#if  defined(WMI_INTERFACE_EVENT_LOGGING) || !defined(QCA_CONFIG_SMP)
	u_int32_t id;
	u_int8_t *data;
#endif

	evt_buf = (wmi_buf_t) htc_packet->pPktContext;
	id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
	/* TX_PAUSE EVENT should be handled with tasklet context */
	if ((WMI_TX_PAUSE_EVENTID == id) ||
		(WMI_WOW_WAKEUP_HOST_EVENTID == id)) {
		if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
			return;

		data = adf_nbuf_data(evt_buf);
		len = adf_nbuf_len(evt_buf);
		tlv_ok_status = wmitlv_check_and_pad_event_tlvs(
					wmi_handle->scn_handle,
					data, len, id,
					&wmi_cmd_struct_ptr);
		if (tlv_ok_status != 0) {
			if (tlv_ok_status == 1) {
				wmi_cmd_struct_ptr = data;
			} else {
				return;
			}
		}

		idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
		if (idx == -1) {
			wmitlv_free_allocated_event_tlvs(id,
				&wmi_cmd_struct_ptr);
			adf_nbuf_free(evt_buf);
			return;
		}
		wmi_handle->event_handler[idx](wmi_handle->scn_handle,
			       wmi_cmd_struct_ptr, len);
		wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr);
		adf_nbuf_free(evt_buf);
		return;
	}

#ifdef WMI_INTERFACE_EVENT_LOGGING
	id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
	data = adf_nbuf_data(evt_buf);

	adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
	/* Exclude 4 bytes of TLV header */
	WMI_RX_EVENT_RECORD(id, ((u_int8_t *)data + 4));
	adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif
	adf_os_spin_lock_bh(&wmi_handle->eventq_lock);
	adf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
	adf_os_spin_unlock_bh(&wmi_handle->eventq_lock);
	schedule_work(&wmi_handle->rx_event_work);
}
/* WMI command API */
int wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, int len,
			 WMI_CMD_ID cmd_id)
{
	HTC_PACKET *pkt;
	A_STATUS status;
	void *vos_context;
	struct ol_softc *scn;
	A_UINT16 htc_tag = 0;

	if (wmi_get_runtime_pm_inprogress(wmi_handle))
		goto skip_suspend_check;

	if (adf_os_atomic_read(&wmi_handle->is_target_suspended) &&
			( (WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID != cmd_id) &&
			  (WMI_PDEV_RESUME_CMDID != cmd_id)) ) {
		pr_err("%s: Target is suspended  could not send WMI command: %d\n",
				__func__, cmd_id);
		VOS_ASSERT(0);
		return -EBUSY;
	} else
		goto dont_tag;

skip_suspend_check:
	switch(cmd_id) {
	case WMI_WOW_ENABLE_CMDID:
	case WMI_PDEV_SUSPEND_CMDID:
	case WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID:
	case WMI_WOW_ADD_WAKE_PATTERN_CMDID:
	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
	case WMI_PDEV_RESUME_CMDID:
	case WMI_WOW_DEL_WAKE_PATTERN_CMDID:
#ifdef FEATURE_WLAN_D0WOW
	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
#endif
		htc_tag = HTC_TX_PACKET_TAG_AUTO_PM;
	default:
		break;
	}

dont_tag:
	/* Do sanity check on the TLV parameter structure */
	{
		void *buf_ptr = (void *) adf_nbuf_data(buf);

		if (wmitlv_check_command_tlv_params(NULL, buf_ptr, len, cmd_id) != 0)
		{
			adf_os_print("\nERROR: %s: Invalid WMI Parameter Buffer for Cmd:%d\n",
				     __func__, cmd_id);
			return -1;
		}
	}

	if (adf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
		pr_err("%s, Failed to send cmd %x, no memory\n",
		       __func__, cmd_id);
		return -ENOMEM;
	}

	WMI_SET_FIELD(adf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);

	adf_os_atomic_inc(&wmi_handle->pending_cmds);
	if (adf_os_atomic_read(&wmi_handle->pending_cmds) >= WMI_MAX_CMDS) {
		vos_context = vos_get_global_context(VOS_MODULE_ID_WDA, NULL);
		scn = vos_get_context(VOS_MODULE_ID_HIF, vos_context);
		pr_err("\n%s: hostcredits = %d\n", __func__,
		       wmi_get_host_credits(wmi_handle));
		HTC_dump_counter_info(wmi_handle->htc_handle);
		//dump_CE_register(scn);
		//dump_CE_debug_register(scn->hif_sc);
		adf_os_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s: MAX 1024 WMI Pending cmds reached.\n", __func__);
		vos_set_logp_in_progress(VOS_MODULE_ID_VOSS, TRUE);
		schedule_work(&recovery_work);
		return -EBUSY;
	}

	pkt = adf_os_mem_alloc(NULL, sizeof(*pkt));
	if (!pkt) {
		adf_os_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s, Failed to alloc htc packet %x, no memory\n",
		       __func__, cmd_id);
		return -ENOMEM;
	}

	SET_HTC_PACKET_INFO_TX(pkt,
			NULL,
			adf_nbuf_data(buf),
			len + sizeof(WMI_CMD_HDR),
			/* htt_host_data_dl_len(buf)+20 */
			wmi_handle->wmi_endpoint_id,
			htc_tag);

	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);

	WMA_LOGD("Send WMI command:%s command_id:%d",
			get_wmi_cmd_string(cmd_id), cmd_id);

#ifdef WMI_INTERFACE_EVENT_LOGGING
	adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
        /*Record 16 bytes of WMI cmd data - exclude TLV and WMI headers*/
        WMI_COMMAND_RECORD(cmd_id ,((u_int32_t *)adf_nbuf_data(buf) + 2));
	adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif

	status = HTCSendPkt(wmi_handle->htc_handle, pkt);

	if (A_OK != status) {
		adf_os_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s %d, HTCSendPkt failed\n", __func__, __LINE__);
	}


	return ((status == A_OK) ? EOK : -1);
}
/*
 * Process a radar event.
 *
 * If a radar event is found, return 1.  Otherwise, return 0.
 *
 * There is currently no way to specify that a radar event has occured on
 * a specific channel, so the current methodology is to mark both the pri
 * and ext channels as being unavailable.  This should be fixed for 802.11ac
 * or we'll quickly run out of valid channels to use.
 */
int
dfs_process_radarevent(struct ath_dfs *dfs, struct ieee80211_channel *chan)
{
//commenting for now to validate radar indication msg to SAP
//#if 0
    struct dfs_event re,*event;
    struct dfs_state *rs=NULL;
    struct dfs_filtertype *ft;
    struct dfs_filter *rf;
    int found, retval = 0, p, empty;
    int events_processed = 0;
    u_int32_t tabledepth, index;
    u_int64_t deltafull_ts = 0, this_ts, deltaT;
    struct ieee80211_channel *thischan;
    struct dfs_pulseline *pl;
    static u_int32_t  test_ts  = 0;
    static u_int32_t  diff_ts  = 0;
    int ext_chan_event_flag = 0;
#if 0
    int pri_multiplier = 2;
#endif
    int i;

   if (dfs == NULL) {
      VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
                      "%s[%d]: dfs is NULL", __func__, __LINE__);
      return 0;
   }
    pl = dfs->pulses;
   adf_os_spin_lock_bh(&dfs->ic->chan_lock);
   if ( !(IEEE80211_IS_CHAN_DFS(dfs->ic->ic_curchan))) {
           adf_os_spin_unlock_bh(&dfs->ic->chan_lock);
           DFS_DPRINTK(dfs, ATH_DEBUG_DFS2, "%s: radar event on non-DFS chan",
                        __func__);
                dfs_reset_radarq(dfs);
                dfs_reset_alldelaylines(dfs);
         return 0;
        }
   adf_os_spin_unlock_bh(&dfs->ic->chan_lock);
#ifndef ATH_DFS_RADAR_DETECTION_ONLY
   /* TEST : Simulate radar bang, make sure we add the channel to NOL (bug 29968) */
        if (dfs->dfs_bangradar) {
                    /* bangradar will always simulate radar found on the primary channel */
           rs = &dfs->dfs_radar[dfs->dfs_curchan_radindex];
           dfs->dfs_bangradar = 0; /* reset */
                DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: bangradar", __func__);
           retval = 1;
                     goto dfsfound;
    }
#endif

        /*
          The HW may miss some pulses especially with high channel loading.
          This is true for Japan W53 where channel loaoding is 50%. Also
          for ETSI where channel loading is 30% this can be an issue too.
          To take care of missing pulses, we introduce pri_margin multiplie.
          This is normally 2 but can be higher for W53.
        */

        if ((dfs->dfsdomain  == DFS_MKK4_DOMAIN) &&
            (dfs->dfs_caps.ath_chip_is_bb_tlv) &&
            (chan->ic_freq < FREQ_5500_MHZ)) {

            dfs->dfs_pri_multiplier = dfs->dfs_pri_multiplier_ini;

            /* do not process W53 pulses,
               unless we have a minimum number of them
             */
            if (dfs->dfs_phyerr_w53_counter >= 5) {
               DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
                       "%s: w53_counter=%d, freq_max=%d, "
                       "freq_min=%d, pri_multiplier=%d",
                       __func__,
                       dfs->dfs_phyerr_w53_counter,
                       dfs->dfs_phyerr_freq_max,
                       dfs->dfs_phyerr_freq_min,
                       dfs->dfs_pri_multiplier);
                dfs->dfs_phyerr_freq_min     = 0x7fffffff;
                dfs->dfs_phyerr_freq_max     = 0;
            } else {
                return 0;
            }
        }
        DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
                    "%s: pri_multiplier=%d",
                    __func__,
                    dfs->dfs_pri_multiplier);

   ATH_DFSQ_LOCK(dfs);
   empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
   ATH_DFSQ_UNLOCK(dfs);

   while ((!empty) && (!retval) && (events_processed < MAX_EVENTS)) {
      ATH_DFSQ_LOCK(dfs);
      event = STAILQ_FIRST(&(dfs->dfs_radarq));
      if (event != NULL)
         STAILQ_REMOVE_HEAD(&(dfs->dfs_radarq), re_list);
      ATH_DFSQ_UNLOCK(dfs);

      if (event == NULL) {
         empty = 1;
         VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR, "%s[%d]: event is NULL ",__func__,__LINE__);
                        break;
      }
                events_processed++;
                re = *event;

      OS_MEMZERO(event, sizeof(struct dfs_event));
      ATH_DFSEVENTQ_LOCK(dfs);
      STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list);
      ATH_DFSEVENTQ_UNLOCK(dfs);

      found = 0;

      adf_os_spin_lock_bh(&dfs->ic->chan_lock);
      if (dfs->ic->disable_phy_err_processing) {
         ATH_DFSQ_LOCK(dfs);
         empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
         ATH_DFSQ_UNLOCK(dfs);
         adf_os_spin_unlock_bh(&dfs->ic->chan_lock);
         continue;
      }

      adf_os_spin_unlock_bh(&dfs->ic->chan_lock);

      if (re.re_chanindex < DFS_NUM_RADAR_STATES)
         rs = &dfs->dfs_radar[re.re_chanindex];
      else {
         ATH_DFSQ_LOCK(dfs);
         empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
         ATH_DFSQ_UNLOCK(dfs);
         continue;
      }
      if (rs->rs_chan.ic_flagext & CHANNEL_INTERFERENCE) {
         ATH_DFSQ_LOCK(dfs);
         empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
         ATH_DFSQ_UNLOCK(dfs);
         continue;
      }

      if (dfs->dfs_rinfo.rn_lastfull_ts == 0) {
         /*
          * Either not started, or 64-bit rollover exactly to zero
          * Just prepend zeros to the 15-bit ts
          */
         dfs->dfs_rinfo.rn_ts_prefix = 0;
      } else {
                         /* WAR 23031- patch duplicate ts on very short pulses */
                        /* This pacth has two problems in linux environment.
                         * 1)The time stamp created and hence PRI depends entirely on the latency.
                         *   If the latency is high, it possibly can split two consecutive
                         *   pulses in the same burst so far away (the same amount of latency)
                         *   that make them look like they are from differenct bursts. It is
                         *   observed to happen too often. It sure makes the detection fail.
                         * 2)Even if the latency is not that bad, it simply shifts the duplicate
                         *   timestamps to a new duplicate timestamp based on how they are processed.
                         *   This is not worse but not good either.
                         *
                         *   Take this pulse as a good one and create a probable PRI later
                         */
                        if (re.re_dur == 0 && re.re_ts == dfs->dfs_rinfo.rn_last_unique_ts) {
                                debug_dup[debug_dup_cnt++] = '1';
                                DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, " %s deltaT is 0 ", __func__);
                        } else {
                                dfs->dfs_rinfo.rn_last_unique_ts = re.re_ts;
                                debug_dup[debug_dup_cnt++] = '0';
                        }
                        if (debug_dup_cnt >= 32){
                                 debug_dup_cnt = 0;
                        }


         if (re.re_ts <= dfs->dfs_rinfo.rn_last_ts) {
            dfs->dfs_rinfo.rn_ts_prefix +=
               (((u_int64_t) 1) << DFS_TSSHIFT);
            /* Now, see if it's been more than 1 wrap */
            deltafull_ts = re.re_full_ts - dfs->dfs_rinfo.rn_lastfull_ts;
            if (deltafull_ts >
                ((u_int64_t)((DFS_TSMASK - dfs->dfs_rinfo.rn_last_ts) + 1 + re.re_ts)))
               deltafull_ts -= (DFS_TSMASK - dfs->dfs_rinfo.rn_last_ts) + 1 + re.re_ts;
            deltafull_ts = deltafull_ts >> DFS_TSSHIFT;
            if (deltafull_ts > 1) {
               dfs->dfs_rinfo.rn_ts_prefix +=
                  ((deltafull_ts - 1) << DFS_TSSHIFT);
            }
         } else {
            deltafull_ts = re.re_full_ts - dfs->dfs_rinfo.rn_lastfull_ts;
            if (deltafull_ts > (u_int64_t) DFS_TSMASK) {
               deltafull_ts = deltafull_ts >> DFS_TSSHIFT;
               dfs->dfs_rinfo.rn_ts_prefix +=
                  ((deltafull_ts - 1) << DFS_TSSHIFT);
            }
         }
示例#17
0
/* Interface functions */
static struct ieee80211_node *
ol_ath_node_alloc(struct ieee80211vap *vap, const u_int8_t *macaddr, bool tmpnode)
{
    struct ieee80211com *ic = vap->iv_ic;
    struct ol_ath_vap_net80211 *avn = OL_ATH_VAP_NET80211(vap);
    struct ol_ath_softc_net80211 *scn = OL_ATH_SOFTC_NET80211(ic);
    struct ol_ath_node_net80211 *anode;

    adf_os_spin_lock_bh(&scn->scn_lock);
    scn->peer_count++;
    if (scn->peer_count > scn->wlan_resource_config.num_peers) {
        adf_os_spin_unlock_bh(&scn->scn_lock);
        printk("%s: vap (%p) scn (%p) the peer count exceeds the supported number %d \n",
                __func__, vap, scn, scn->wlan_resource_config.num_peers);
        goto err_node_alloc;
    }
    adf_os_spin_unlock_bh(&scn->scn_lock);

    anode = (struct ol_ath_node_net80211 *)OS_MALLOC(scn->sc_osdev,
                                                  sizeof(struct ol_ath_node_net80211),
                                                  GFP_ATOMIC);
    if (anode == NULL)
        goto err_node_alloc;

    OS_MEMZERO(anode, sizeof(struct ol_ath_node_net80211));

    anode->an_node.ni_vap = vap;

    /* do not create/delete peer on target for temp nodes and self-peers */
    if (!tmpnode && !is_node_self_peer(vap, macaddr)) {
        if (wmi_unified_peer_create_send(scn->wmi_handle, macaddr,avn->av_if_id)) {
            printk("%s : Unable to create peer in Target \n", __func__);
            OS_FREE(anode);
            goto err_node_alloc;
        }

        adf_os_spin_lock_bh(&scn->scn_lock);
        anode->an_txrx_handle = ol_txrx_peer_attach(scn->pdev_txrx_handle,
                avn->av_txrx_handle, (u_int8_t *) macaddr);

        if (anode->an_txrx_handle == NULL) {
			adf_os_spin_unlock_bh(&scn->scn_lock);
            printk("%s : Unable to attach txrx peer\n", __func__);
            OS_FREE(anode);
            goto err_node_alloc;
        }
        adf_os_spin_unlock_bh(&scn->scn_lock);

        /* static wep keys stored in vap needs to be
         * pushed to all nodes except self node
         */
        if(IEEE80211_VAP_IS_PRIVACY_ENABLED(vap) &&
                (OS_MEMCMP(macaddr,vap->iv_myaddr,IEEE80211_ADDR_LEN) != 0 )) {
            set_node_wep_keys(vap,macaddr);
        }
    }

    return &anode->an_node;

err_node_alloc:
    adf_os_spin_lock_bh(&scn->scn_lock);
    scn->peer_count--;
    adf_os_spin_unlock_bh(&scn->scn_lock);
    return NULL;

}
   /*
   * Call netif_stop_queue frequently will impact the mboxping tx t-put.
   * Return HTC_SEND_FULL_KEEP directly in epping_tx_queue_full to avoid.
   */
   return HTC_SEND_FULL_KEEP;
}
#endif /* HIF_SDIO */
void epping_tx_complete_multiple(void *ctx,
   HTC_PACKET_QUEUE *pPacketQueue)
{
   epping_context_t *pEpping_ctx = (epping_context_t *)ctx;
   epping_adapter_t *pAdapter = pEpping_ctx->epping_adapter;
   struct net_device* dev = pAdapter->dev;
   A_STATUS status;
   HTC_ENDPOINT_ID eid;
   adf_nbuf_t pktSkb;
   struct epping_cookie *cookie;
   A_BOOL flushing = FALSE;
   adf_nbuf_queue_t skb_queue;
   HTC_PACKET *htc_pkt;

   adf_nbuf_queue_init(&skb_queue);

   adf_os_spin_lock_bh(&pAdapter->data_lock);

   while (!HTC_QUEUE_EMPTY(pPacketQueue)) {
      htc_pkt = HTC_PACKET_DEQUEUE(pPacketQueue);
      if (htc_pkt == NULL)
         break;
      status=htc_pkt->Status;
      eid=htc_pkt->Endpoint;
      pktSkb=GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
      cookie = htc_pkt->pPktContext;

      if (!pktSkb) {
         EPPING_LOG(VOS_TRACE_LEVEL_ERROR,
            "%s: pktSkb is NULL", __func__);
         ASSERT(0);
      } else {
         if (htc_pkt->pBuffer != adf_nbuf_data(pktSkb)) {
            EPPING_LOG(VOS_TRACE_LEVEL_ERROR,
               "%s: htc_pkt buffer not equal to skb->data", __func__);
            ASSERT(0);
         }

         /* add this to the list, use faster non-lock API */
         adf_nbuf_queue_add(&skb_queue,pktSkb);

         if (A_SUCCESS(status))
            if (htc_pkt->ActualLength != adf_nbuf_len(pktSkb)) {
               EPPING_LOG(VOS_TRACE_LEVEL_ERROR,
                  "%s: htc_pkt length not equal to skb->len", __func__);
               ASSERT(0);
            }
      }

      EPPING_LOG(VOS_TRACE_LEVEL_INFO,
         "%s skb=%p data=%p len=0x%x eid=%d ",
         __func__, pktSkb, htc_pkt->pBuffer,
         htc_pkt->ActualLength, eid);

      if (A_FAILED(status)) {
         if (status == A_ECANCELED) {
            /* a packet was flushed  */
            flushing = TRUE;
         }
         if (status != A_NO_RESOURCE) {
            printk("%s() -TX ERROR, status: 0x%x\n", __func__,
               status);
         }
      } else {
         EPPING_LOG(VOS_TRACE_LEVEL_INFO, "%s: OK\n", __func__);
         flushing = FALSE;
      }

      epping_free_cookie(pAdapter->pEpping_ctx, cookie);
   }

   adf_os_spin_unlock_bh(&pAdapter->data_lock);

   /* free all skbs in our local list */
   while (adf_nbuf_queue_len(&skb_queue)) {
      /* use non-lock version */
      pktSkb = adf_nbuf_queue_remove(&skb_queue);
      if (pktSkb == NULL)
         break;
      adf_nbuf_tx_free(pktSkb, ADF_NBUF_PKT_ERROR);
      pEpping_ctx->total_tx_acks++;
   }

   if (!flushing) {
      netif_wake_queue(dev);
   }
}
int epping_tx_send(adf_nbuf_t skb, epping_adapter_t *pAdapter)
{
   adf_nbuf_t nodrop_skb;
   EPPING_HEADER *eppingHdr;
   A_UINT8 ac = 0;

   eppingHdr = (EPPING_HEADER *)adf_nbuf_data(skb);

   if (!IS_EPPING_PACKET(eppingHdr)) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: Recived non endpoint ping packets\n", __func__);
      /* no packet to send, cleanup */
      adf_nbuf_free(skb);
      return -ENOMEM;
   }

   /* the stream ID is mapped to an access class */
   ac = eppingHdr->StreamNo_h;
   /* hard coded two ep ids */
   if (ac != 0 && ac != 1) {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: ac %d is not mapped to mboxping service\n", __func__, ac);
      adf_nbuf_free(skb);
      return -ENOMEM;
   }

   /*
    * some EPPING packets cannot be dropped no matter what access class
    * it was sent on. A special care has been taken:
    * 1. when there is no TX resource, queue the control packets to
    *    a special queue
    * 2. when there is TX resource, send the queued control packets first
    *    and then other packets
    * 3. a timer launches to check if there is queued control packets and
    *    flush them
    */

   /* check the nodrop queue first */
   while ((nodrop_skb = adf_nbuf_queue_remove(&pAdapter->nodrop_queue))) {
      HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, TRUE);
      if (epping_tx_send_int(nodrop_skb, pAdapter)) {
         EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
            "%s: nodrop: %p xmit fail\n", __func__, nodrop_skb);
         /* fail to xmit so put the nodrop packet to the nodrop queue */
         adf_nbuf_queue_insert_head(&pAdapter->nodrop_queue, nodrop_skb);
         /* no cookie so free the current skb */
         goto tx_fail;
      } else {
         HTCSetNodropPkt(pAdapter->pEpping_ctx->HTCHandle, FALSE);
         EPPING_LOG(VOS_TRACE_LEVEL_INFO,
            "%s: nodrop: %p xmit ok\n", __func__, nodrop_skb);
      }
   }

   /* send the original packet */
   if (epping_tx_send_int(skb, pAdapter))
      goto tx_fail;

   return 0;

tx_fail:
   if (!IS_EPING_PACKET_NO_DROP(eppingHdr)) {
      /* allow to drop the skb so drop it */
      adf_nbuf_free(skb);
      ++pAdapter->stats.tx_dropped;
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
         "%s: Tx skb %p dropped, stats.tx_dropped = %ld\n",
         __func__, skb, pAdapter->stats.tx_dropped);
      return -ENOMEM;
   } else {
      EPPING_LOG(VOS_TRACE_LEVEL_FATAL,
                 "%s: nodrop: %p queued\n", __func__, skb);
      adf_nbuf_queue_add(&pAdapter->nodrop_queue, skb);
      adf_os_spin_lock_bh(&pAdapter->data_lock);
      if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
         pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
         adf_os_timer_mod(&pAdapter->epping_timer, TX_RETRY_TIMEOUT_IN_MS);
      }
      adf_os_spin_unlock_bh(&pAdapter->data_lock);
   }

   return 0;
}