void ath_htc_ic_update_target(struct ieee80211com *ic, void *target, int size) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); scn->sc_ops->ath_wmi_ic_update_target(scn->sc_dev, target, size); }
void ath_get_config_chainmask(struct ieee80211com *ic, void *vtar) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); scn->sc_ops->ath_get_config_param(scn->sc_dev, ATH_PARAM_TXCHAINMASK, vtar); }
void ath_update_node_target(struct ieee80211com *ic, void *ntar, int size) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); scn->sc_ops->ath_wmi_update_node(scn->sc_dev, ntar, size); }
int ath_get_num_mapped_dst(struct ieee80211com *ic, int channel) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); return sc->sc_aow.chan_addr[channel].dst_cnt; }
void ath_remove_audio_channel(struct ieee80211com* ic, int channel, struct ether_addr* macaddr) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); int i = 0; if ((channel < 0) || (channel > (AOW_MAX_AUDIO_CHANNELS - 1))) { return; } for (i = 0 ; i < AOW_MAX_RECEIVER_COUNT; i++) { if (!IS_ETHER_ADDR_NULL(sc->sc_aow.chan_addr[channel].addr[i].octet)) { if (IEEE80211_ADDR_EQ(&sc->sc_aow.chan_addr[channel].addr[i], macaddr)) { memset(&sc->sc_aow.chan_addr[channel].addr[i], 0 , IEEE80211_ADDR_LEN); sc->sc_aow.chan_addr[channel].dst_cnt--; sc->sc_aow.mapped_recv_cnt--; } } } if (!sc->sc_aow.chan_addr[channel].dst_cnt) { sc->sc_aow.chan_addr[channel].valid = AH_FALSE; sc->sc_aow.chan_addr[channel].seqno = 0; ic->ic_aow.channel_set_flag &= ~(1 << channel); } }
void ath_set_aow_latency(struct ieee80211com *ic, u_int32_t val) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); if (!is_aow_audio_stopped(ic)) { IEEE80211_AOW_DPRINTF("Device busy\n"); return; } if ((val < (AOW_MIN_LATENCY / 1000)) || (val > (AOW_MAX_LATENCY / 1000))) { IEEE80211_AOW_DPRINTF("Invalid value. Min:%ums Max:%ums\n", (AOW_MIN_LATENCY / 1000), (AOW_MAX_LATENCY / 1000)); return; } sc->sc_aow.latency = val; sc->sc_aow.latency_us = val * 1000; sc->sc_rxtimeout[WME_AC_VO] = sc->sc_aow.latency - (ic->ic_get_aow_rx_proc_time(ic) + ATH_RXTIMEOUT_BUFFER)/1000; if (AOW_ES_ENAB(ic)) { //Reset ES ic->ic_set_aow_es(ic, 0); ic->ic_set_aow_es(ic, 1); } else if (AOW_ESS_ENAB(ic)) { //Reset ESS ic->ic_set_aow_ess(ic, 0); ic->ic_set_aow_ess(ic, 1); } }
u_int32_t ath_get_swretries(struct ieee80211com *ic) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); return sc->sc_aow.sw_retry_limit; }
void ath_add_vap_target(struct ieee80211com *ic, void *vtar, int size) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); scn->sc_ops->ath_wmi_add_vap(scn->sc_dev, vtar, size); }
u_int32_t ath_net80211_htc_node_getrate(const struct ieee80211_node *ni, u_int8_t type) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); u_int8_t *vtar = NULL; u_int32_t size = 0; u_int8_t nodeindex = 0; nodeindex = ath_find_tgt_node_index((struct ieee80211_node *)ni); vtar = &nodeindex; size = sizeof(nodeindex); switch (type) { case IEEE80211_RATE_TX: ATH_NODE_NET80211(ni)->an_avgtxrate = scn->sc_ops->ath_wmi_node_getrate(scn->sc_dev, vtar, size); return (ATH_NODE_NET80211(ni)->an_avgtxrate); case IEEE80211_LASTRATE_TX: return (ATH_NODE_NET80211(ni)->an_lasttxrate) ; case IEEE80211_RATECODE_TX: return (ATH_NODE_NET80211(ni)->an_txratecode); case IEEE80211_RATE_RX: return ATH_RATE_OUT(ATH_NODE_NET80211(ni)->an_avgrxrate); default: return 0; } }
void ath_net80211_uapsd_creditupdate(ieee80211_handle_t ieee) { #ifdef MAGPIE_HIF_GMAC struct ieee80211com *ic = NET80211_HANDLE(ieee); struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); ieee80211_iterate_node(ic, ath_net80211_drain_uapsd, scn); #endif }
void ath_set_aow_er(struct ieee80211com *ic, u_int32_t val) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); sc->sc_aow.er = val ? 1:0; /* ER not supported in this release */ IEEE80211_AOW_DPRINTF("Error Recovery : Not supported\n"); sc->sc_aow.er = 0; }
u_int32_t ath_get_aow_tx_rate_info(struct ieee80211_node* ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_node_net80211 *an = (struct ath_node_net80211 *)ni; if (scn->sc_ops->ath_get_tx_rate_info) return scn->sc_ops->ath_get_tx_rate_info(scn->sc_dev, an->an_sta); return 0; }
/* * This function is called when we have successsfully transmitted EOSP. * It clears the SP flag so that we are ready to accept more triggers * from this node. */ void ath_net80211_uapsd_eospindicate(ieee80211_node_t node, wbuf_t wbuf, int txok, int force_eosp) { struct ieee80211_qosframe *qwh; struct ieee80211_node *ni; struct ath_softc_net80211 *scn; if (node == NULL) return; qwh = (struct ieee80211_qosframe *)wbuf_header(wbuf); ni = (struct ieee80211_node *)node; scn = ATH_SOFTC_NET80211(ni->ni_ic); if (node == NULL) return; if ((qwh->i_fc[0] == (IEEE80211_FC0_SUBTYPE_QOS|IEEE80211_FC0_TYPE_DATA)) || (qwh->i_fc[0] == (IEEE80211_FC0_SUBTYPE_QOS_NULL|IEEE80211_FC0_TYPE_DATA)) || force_eosp) { if ( #if ATH_SUPPORT_WIFIPOS (ni->ni_flags & IEEE80211_NODE_WAKEUP) || #endif (qwh->i_qos[0] & IEEE80211_QOS_EOSP) || force_eosp) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; DPRINTF(scn, ATH_DEBUG_UAPSD, "%s : End SP\n", __func__); if (!txok) ni->ni_stats.ns_tx_eosplost++; } } if ((qwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT) { /* clear the SP for node */ ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; } return; }
u_int16_t ath_get_aow_rtsretries(struct ieee80211com *ic) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); u_int16_t rtstries = 0; scn->sc_ops->get_txqproperty(scn->sc_dev, sc->sc_haltype2q[HAL_WME_AC_VO], TXQ_PROP_SHORT_RETRY_LIMIT, &rtstries); return (rtstries - 1); }
void ath_set_aow_playlocal(struct ieee80211com *ic, u_int32_t val) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); sc->sc_aow.playlocal = val; if (val) { aow_i2s_init(ic); } else { aow_i2s_deinit(ic); CLR_I2S_STOP_FLAG(ic->ic_aow.i2s_flags); } }
int ath_htc_wmm_update_enable(struct ieee80211com *ic) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); /* For Windows, we just need to set the flag sc_htc_wmm_update_enabled, because we have dedicated thread, but in current Linux driver, we use worker queue for us, thus we need to trigger the worker queue */ scn->sc_htc_wmm_update_enabled = 1; scn->sc_ops->ath_schedule_wmm_update(scn->sc_dev); return 0; }
int ath_get_aow_chan_seqno(struct ieee80211com *ic, int channel, int* seqno) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); if ((channel < 0) || (channel > (AOW_MAX_AUDIO_CHANNELS - 1))) { return AH_FALSE; } if (sc->sc_aow.chan_addr[channel].valid) { *seqno = sc->sc_aow.chan_addr[channel].seqno++; } return AH_TRUE; }
int ath_get_aow_macaddr(struct ieee80211com *ic, int channel, int index, struct ether_addr *macaddr) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); int ret = AH_FALSE; if ((channel < 0) || (channel > (AOW_MAX_AUDIO_CHANNELS - 1))) { return AH_FALSE; } if (sc->sc_aow.chan_addr[channel].valid) { OS_MEMCPY(macaddr, &sc->sc_aow.chan_addr[channel].addr[index], sizeof(struct ether_addr)); ret = AH_TRUE; } return ret; }
void ath_clear_audio_channel_list(struct ieee80211com *ic, u_int32_t val) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); int i = 0; int j = 0; int k = 0; /* clear the mapped receiver count */ sc->sc_aow.mapped_recv_cnt = 0; if (val < 0) { IEEE80211_AOW_DPRINTF("AoW Error: Invalid channel index\n"); return; } if (val < AOW_MAX_AUDIO_CHANNELS) { sc->sc_aow.chan_addr[val].channel = 0; sc->sc_aow.chan_addr[val].valid = AH_FALSE; sc->sc_aow.chan_addr[val].seqno = 0; sc->sc_aow.chan_addr[val].dst_cnt = 0; for ( k = 0; k < AOW_MAX_RECEIVER_COUNT; k++) { OS_MEMSET(&sc->sc_aow.chan_addr[val].addr[k], 0x00, sizeof(struct ether_addr)); } /* clear the set channel map */ ic->ic_aow.channel_set_flag &= ~(1 << val); } else { for (i = 0; i < AOW_MAX_AUDIO_CHANNELS; i++) { sc->sc_aow.chan_addr[i].channel = 0; sc->sc_aow.chan_addr[i].valid = AH_FALSE; sc->sc_aow.chan_addr[i].seqno = 0; sc->sc_aow.chan_addr[i].dst_cnt = 0; for (j = 0; j < AOW_MAX_RECEIVER_COUNT; j++) OS_MEMSET(&sc->sc_aow.chan_addr[i].addr[j], 0x00, sizeof(struct ether_addr)); } /* clear the set channel map */ ic->ic_aow.channel_set_flag = 0; } }
void ath_list_audio_channel(struct ieee80211com *ic) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); int i = 0; int j = 0; for (i = 0; i < AOW_MAX_AUDIO_CHANNELS; i++) { if (sc->sc_aow.chan_addr[i].valid) { IEEE80211_AOW_DPRINTF("\nAudio channel = %d\n", sc->sc_aow.chan_addr[i].channel); IEEE80211_AOW_DPRINTF("-------------------\n"); //for (j = 0; j < sc->sc_aow.chan_addr[i].dst_cnt; j++) { for (j = 0; j < AOW_MAX_RECEIVER_COUNT; j++) { if (!IS_ETHER_ADDR_NULL(sc->sc_aow.chan_addr[i].addr[j].octet)) IEEE80211_AOW_DPRINTF("%s\n",ether_sprintf((char*)&sc->sc_aow.chan_addr[i].addr[j])); } } } }
void ath_net80211_uapsd_process_uapsd_trigger(ieee80211_handle_t ieee, struct ieee80211_node *ni, bool enforce_max_sp, bool *sent_eosp) { struct ieee80211com *ic = NET80211_HANDLE(ieee); struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); if (enforce_max_sp) { scn->sc_ops->process_uapsd_trigger(scn->sc_dev, ATH_NODE_NET80211(ni)->an_sta, ni->ni_uapsd_maxsp, 0, 0, sent_eosp, WME_UAPSD_NODE_MAXQDEPTH); } else { scn->sc_ops->process_uapsd_trigger(scn->sc_dev, ATH_NODE_NET80211(ni)->an_sta, WME_UAPSD_NODE_MAXQDEPTH, 0, 1, sent_eosp, WME_UAPSD_NODE_MAXQDEPTH); } return; }
void ath_set_aow_es(struct ieee80211com *ic, u_int32_t val) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); int retval; if (val) { if (sc->sc_aow.ess) { ic->ic_set_aow_ess(ic, 0); } if ((retval = aow_es_base_init(ic)) < 0) { IEEE80211_AOW_DPRINTF("Ext Stats Init failed. Turning off ES\n"); sc->sc_aow.es = 0; } else { sc->sc_aow.es = 1; } } else { aow_es_base_deinit(ic); sc->sc_aow.es = 0; } }
void ath_set_aow_rtsretries(struct ieee80211com *ic, u_int16_t val) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); u_int16_t rtstries = (val + 1); int val_for_mincmp = val; /* To keep the compiler happy. */ if((val_for_mincmp < ATH_AOW_MIN_RTS_RETRIES) || (val > ATH_AOW_MAX_RTS_RETRIES)) { IEEE80211_AOW_DPRINTF("Invalid value (%u) for rts_retries. " "Min: %u Max: %u\n", val, ATH_AOW_MIN_RTS_RETRIES, ATH_AOW_MAX_RTS_RETRIES); return; } scn->sc_ops->update_txqproperty(scn->sc_dev, sc->sc_haltype2q[HAL_WME_AC_VO], TXQ_PROP_SHORT_RETRY_LIMIT, &rtstries); return; }
void ath_set_audio_channel(struct ieee80211com *ic, int channel, struct ether_addr *macaddr) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); int index = 0; u_int8_t event_subtype = CM_CHANNEL_ADDRESS_SET_PASS; if ((channel < 0) || (channel > (AOW_MAX_AUDIO_CHANNELS - 1))) { event_subtype = CM_CHANNEL_ADDRESS_SET_FAIL; goto err; return; } if ((sc->sc_aow.mapped_recv_cnt >= AOW_MAX_RECEIVER_COUNT) || (sc->sc_aow.chan_addr[channel].dst_cnt >= AOW_MAX_RECEIVER_COUNT)) { IEEE80211_AOW_DPRINTF("\nSet error : Max Limit reached\n"); ath_list_audio_channel(ic); event_subtype = CM_CHANNEL_ADDRESS_SET_FAIL; goto err; return; } sc->sc_aow.chan_addr[channel].channel = channel; sc->sc_aow.chan_addr[channel].valid = AH_TRUE; index = sc->sc_aow.chan_addr[channel].dst_cnt; OS_MEMCPY(&sc->sc_aow.chan_addr[channel].addr[index], macaddr, sizeof(struct ether_addr)); sc->sc_aow.chan_addr[channel].dst_cnt++; sc->sc_aow.mapped_recv_cnt++; /* set the channel bit flag to optimize the check on transmit */ ic->ic_aow.channel_set_flag = ic->ic_aow.channel_set_flag | (1 << channel); err: ieee80211_aow_send_to_host(ic, &event_subtype, sizeof(event_subtype), AOW_HOST_PKT_EVENT, event_subtype, NULL); }
u_int32_t ath_aow_get_rx_proc_time(struct ieee80211com *ic) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); return sc->sc_aow.rx_proc_time; }
void ath_aow_reset(struct ieee80211com *ic) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); struct ath_softc *sc = ATH_DEV_TO_SC(scn->sc_dev); ath_internal_reset(sc); }
void if_ath_aow_proc_timer_set_period(struct ieee80211com *ic, u_int32_t period) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); scn->sc_ops->ath_aow_proc_timer_set_period( scn->sc_dev, period); }
void if_ath_aow_proc_timer_stop(struct ieee80211com *ic) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); scn->sc_ops->ath_aow_proc_timer_stop(scn->sc_dev); }
void if_ath_gpio11_toggle(struct ieee80211com *ic, u_int16_t flg) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); scn->sc_ops->ath_gpio11_toggle_ptr(scn->sc_dev, flg); }
void if_ath_start_aow_timer(struct ieee80211com *ic, u_int32_t startTime, u_int32_t period) { struct ath_softc_net80211 *scn = ATH_SOFTC_NET80211(ic); scn->sc_ops->ath_start_aow_timer(scn->sc_dev, startTime, period); }