int wmi_unified_connect_htc_service(struct wmi_unified * wmi_handle, void *htc_handle) { int status; HTC_SERVICE_CONNECT_RESP response; HTC_SERVICE_CONNECT_REQ connect; OS_MEMZERO(&connect, sizeof(connect)); OS_MEMZERO(&response, sizeof(response)); /* meta data is unused for now */ connect.pMetaData = NULL; connect.MetaDataLength = 0; /* these fields are the same for all service endpoints */ connect.EpCallbacks.pContext = wmi_handle; connect.EpCallbacks.EpTxCompleteMultiple = NULL /* Control path completion ar6000_tx_complete */; connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */; connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */; connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */; connect.EpCallbacks.EpTxComplete = wmi_htc_tx_complete /* ar6000_tx_queue_full */; /* connect to control service */ connect.ServiceID = WMI_CONTROL_SVC; if ((status = HTCConnectService(htc_handle, &connect, &response)) != EOK) { printk(" Failed to connect to WMI CONTROL service status:%d \n", status); return -1;; } wmi_handle->wmi_endpoint_id = response.Endpoint; wmi_handle->htc_handle = htc_handle; return EOK; }
void ar9300_ani_poll_freebsd(struct ath_hal *ah, const struct ieee80211_channel *chan) { HAL_NODE_STATS stats; HAL_ANISTATS anistats; HAL_SURVEY_SAMPLE survey; OS_MEMZERO(&stats, sizeof(stats)); OS_MEMZERO(&anistats, sizeof(anistats)); OS_MEMZERO(&survey, sizeof(survey)); ar9300_ani_ar_poll(ah, &stats, chan, &anistats); /* * If ANI stats are valid, use them to update the * channel survey. */ if (anistats.valid) { survey.cycle_count = anistats.cyclecnt_diff; survey.chan_busy = anistats.rxclr_cnt; survey.ext_chan_busy = anistats.extrxclr_cnt; survey.tx_busy = anistats.txframecnt_diff; survey.rx_busy = anistats.rxframecnt_diff; ath_hal_survey_add_sample(ah, &survey); } }
static void ieee80211_resmgr_clear_event(ieee80211_resmgr_t resmgr, ieee80211_resmgr_vap_priv_t resmgr_vap) { if (resmgr_vap) { /* VAP related event */ OS_MEMZERO(&resmgr_vap->def_event, sizeof(struct ieee80211_resmgr_sm_event)); resmgr_vap->def_event_type = 0; } else { OS_MEMZERO(&resmgr->scandata.def_event, sizeof(struct ieee80211_resmgr_sm_event)); resmgr->scandata.def_event_type = 0; } }
/* * Clear all delay lines for all filter types * * This may be called before any radar pulses are configured * (eg on a non-DFS channel, with radar PHY errors still showing up.) * In that case, just drop out early. */ void dfs_reset_alldelaylines(struct ath_dfs *dfs) { struct dfs_filtertype *ft = NULL; struct dfs_filter *rf; struct dfs_delayline *dl; struct dfs_pulseline *pl; int i,j; if (dfs == NULL) { VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR, "%s[%d]: sc_dfs is NULL", __func__, __LINE__); return; } pl = dfs->pulses; if (pl == NULL) { VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR, "%s[%d]: pl==NULL, dfs=%p", __func__, __LINE__, dfs); return; } if (dfs->dfs_b5radars == NULL) { VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR, "%s[%d]: pl==NULL, b5radars=%p", __func__, __LINE__, dfs->dfs_b5radars); return; } /* reset the pulse log */ pl->pl_firstelem = pl->pl_numelems = 0; pl->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK; for (i=0; i<DFS_MAX_RADAR_TYPES; i++) { if (dfs->dfs_radarf[i] != NULL) { ft = dfs->dfs_radarf[i]; if (NULL != ft) { for (j = 0; j < ft->ft_numfilters; j++) { rf = &(ft->ft_filters[j]); dl = &(rf->rf_dl); if (dl != NULL) { OS_MEMZERO(dl, sizeof(struct dfs_delayline)); dl->dl_lastelem = (0xFFFFFFFF) & DFS_MAX_DL_MASK; } } } } } for (i = 0; i < dfs->dfs_rinfo.rn_numbin5radars; i++) { OS_MEMZERO(&(dfs->dfs_b5radars[i].br_elems[0]), sizeof(struct dfs_bin5elem)*DFS_MAX_B5_SIZE); dfs->dfs_b5radars[i].br_firstelem = 0; dfs->dfs_b5radars[i].br_numelems = 0; dfs->dfs_b5radars[i].br_lastelem = (0xFFFFFFFF)&DFS_MAX_B5_MASK; } }
void ar9300_ani_poll_freebsd(struct ath_hal *ah, const struct ieee80211_channel *chan) { HAL_NODE_STATS stats; HAL_ANISTATS anistats; OS_MEMZERO(&stats, sizeof(stats)); OS_MEMZERO(&anistats, sizeof(anistats)); ar9300_ani_ar_poll(ah, &stats, chan, &anistats); }
void ar9300_get_spectral_params(struct ath_hal *ah, HAL_SPECTRAL_PARAM *ss) { u_int32_t val; HAL_CHANNEL_INTERNAL *chan = NULL; const struct ieee80211_channel *c; int i, ichain, rx_chain_status; struct ath_hal_9300 *ahp = AH9300(ah); HAL_BOOL asleep = ahp->ah_chip_full_sleep; c = AH_PRIVATE(ah)->ah_curchan; if (c != NULL) chan = ath_hal_checkchannel(ah, c); // XXX TODO: just always wake up all chips? if ((AR_SREV_WASP(ah) || AR_SREV_SCORPION(ah)) && asleep) { ar9300_set_power_mode(ah, HAL_PM_AWAKE, AH_TRUE); } val = OS_REG_READ(ah, AR_PHY_SPECTRAL_SCAN); ss->ss_fft_period = MS(val, AR_PHY_SPECTRAL_SCAN_FFT_PERIOD); ss->ss_period = MS(val, AR_PHY_SPECTRAL_SCAN_PERIOD); ss->ss_count = MS(val, AR_PHY_SPECTRAL_SCAN_COUNT); ss->ss_short_report = (val & AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT) ? 1:0; ss->ss_spectral_pri = ( val & AR_PHY_SPECTRAL_SCAN_PRIORITY_HI) ? 1:0; OS_MEMZERO(ss->ss_nf_cal, sizeof(ss->ss_nf_cal)); OS_MEMZERO(ss->ss_nf_pwr, sizeof(ss->ss_nf_cal)); ss->ss_nf_temp_data = 0; if (chan != NULL) { rx_chain_status = OS_REG_READ(ah, AR_PHY_RX_CHAINMASK) & 0x7; for (i = 0; i < HAL_NUM_NF_READINGS; i++) { ichain = i % 3; if (rx_chain_status & (1 << ichain)) { ss->ss_nf_cal[i] = ar9300_noise_floor_get(ah, chan->channel, ichain); ss->ss_nf_pwr[i] = ar9300_noise_floor_power_get(ah, chan->channel, ichain); } } ss->ss_nf_temp_data = OS_REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4, AR_PHY_BB_THERM_ADC_4_LATEST_THERM); } else { HALDEBUG(AH_NULL, HAL_DEBUG_UNMASKABLE, "%s: chan is NULL - no ss nf values\n", __func__); } if ((AR_SREV_WASP(ah) || AR_SREV_SCORPION(ah)) && asleep) { ar9300_set_power_mode(ah, HAL_PM_FULL_SLEEP, AH_TRUE); } }
wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, int len) { wmi_buf_t wmi_buf; /* NOTE: For now the wbuf type is used as WBUF_TX_CTL * But this need to be changed appropriately to reserve * proper headroom for wmi_buffers */ wmi_buf = wbuf_alloc(wmi_handle->osdev, WBUF_TX_CTL, len); if( NULL == wmi_buf ) { /* wbuf_alloc returns NULL if the internel pool in wmi_handle->osdev * is empty */ return NULL; } /* Clear the wmi buffer */ OS_MEMZERO(wbuf_header(wmi_buf), len); /* * Set the length of the buffer to match the allocation size. */ wbuf_set_pktlen(wmi_buf, len); return wmi_buf; }
void pktlog_init(void *_scn) { struct ath_softc_net80211 *scn = (struct ath_softc_net80211 *) _scn; struct pktlog_handle_t *pl_dev = (scn) ? scn->pl_dev : NULL; struct ath_pktlog_info *pl_info = (pl_dev) ? pl_dev->pl_info : g_pktlog_info; struct ath_softc *sc; if (scn) { sc = (struct ath_softc *)scn->sc_dev; sc->pl_info = pl_info; } OS_MEMZERO(pl_info, sizeof(*pl_info)); PKTLOG_LOCK_INIT(pl_info); if (pl_dev) { pl_dev->tgt_pktlog_enabled = false; } pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE; pl_info->buf = NULL; pl_info->log_state = 0; pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR; pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH; pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH; pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH; pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH; pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL; pl_info->pktlen = 0; pl_info->start_time_thruput = 0; pl_info->start_time_per = 0; }
static void hsm_save_memento(ieee80211_hsm_t hsm, hsm_trace_type_t trace_type, u_int8_t initial_state, u_int8_t final_state, int event_type) { hsm_history_t *p_hsm_history = &(hsm->history); hsm_history_info_t *p_memento; /* * History saved in circular buffer. * Save a pointer to next write location and increment pointer. */ spin_lock(&p_hsm_history->hh_lock); p_memento = &(p_hsm_history->data[p_hsm_history->index]); p_hsm_history->index++; if (p_hsm_history->index >= IEEE80211_N(p_hsm_history->data)) { p_hsm_history->index = 0; } spin_unlock(&p_hsm_history->hh_lock); OS_MEMZERO(p_memento, sizeof(*p_memento)); p_memento->trace_type = trace_type; p_memento->initial_state = initial_state; p_memento->final_state = final_state; p_memento->event_type = event_type; }
wmi_buf_t wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint16_t len, uint8_t *file_name, uint32_t line_num) { wmi_buf_t wmi_buf; if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { CDF_ASSERT(0); return NULL; } wmi_buf = cdf_nbuf_alloc_debug(NULL, roundup(len + WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, false, file_name, line_num); if (!wmi_buf) return NULL; /* Clear the wmi buffer */ OS_MEMZERO(cdf_nbuf_data(wmi_buf), len); /* * Set the length of the buffer to match the allocation size. */ cdf_nbuf_set_pktlen(wmi_buf, len); return wmi_buf; }
/* * vap attach. (VAP Module) */ void ieee80211_resmgr_vattach(ieee80211_resmgr_t resmgr, ieee80211_vap_t vap) { int retval; ieee80211_resmgr_vap_priv_t resmgr_vap; /* Allocate and store ResMgr private data in the VAP structure */ resmgr_vap = (ieee80211_resmgr_vap_priv_t) OS_MALLOC(vap->iv_ic->ic_osdev, sizeof(struct ieee80211_resmgr_vap_priv), 0); ASSERT(resmgr_vap != NULL); if (!resmgr_vap) return; OS_MEMZERO(resmgr_vap, sizeof(struct ieee80211_resmgr_vap_priv)); resmgr_vap->state = VAP_STOPPED; resmgr_vap->resmgr = resmgr; /* Setup Off-Channel Scheduler data in the ResMgr private data */ ieee80211_resmgr_oc_sched_vattach(resmgr, vap, resmgr_vap); /* Register an event handler with the VAP module */ retval = ieee80211_vap_register_event_handler(vap, ieee80211_resmgr_vap_evhandler, (void *)resmgr); ASSERT(retval == 0); ieee80211vap_set_resmgr(vap,resmgr_vap); }
void * wmi_unified_attach(ol_scn_t scn_handle, wma_wow_tx_complete_cbk func) { struct wmi_unified *wmi_handle; wmi_handle = (struct wmi_unified *)OS_MALLOC(NULL, sizeof(struct wmi_unified), GFP_ATOMIC); if (wmi_handle == NULL) { printk("allocation of wmi handle failed %zu \n", sizeof(struct wmi_unified)); return NULL; } OS_MEMZERO(wmi_handle, sizeof(struct wmi_unified)); wmi_handle->scn_handle = scn_handle; adf_os_atomic_init(&wmi_handle->pending_cmds); adf_os_atomic_init(&wmi_handle->is_target_suspended); #ifdef FEATURE_RUNTIME_PM adf_os_atomic_init(&wmi_handle->runtime_pm_inprogress); #endif adf_os_spinlock_init(&wmi_handle->eventq_lock); adf_nbuf_queue_init(&wmi_handle->event_queue); #ifdef CONFIG_CNSS cnss_init_work(&wmi_handle->rx_event_work, wmi_rx_event_work); #else INIT_WORK(&wmi_handle->rx_event_work, wmi_rx_event_work); #endif #ifdef WMI_INTERFACE_EVENT_LOGGING adf_os_spinlock_init(&wmi_handle->wmi_record_lock); #endif wmi_handle->wma_wow_tx_complete_cbk = func; return wmi_handle; }
ieee80211_pwrsave_smps_t ieee80211_pwrsave_smps_attach(struct ieee80211vap *vap, u_int32_t smpsDynamic) { ieee80211_pwrsave_smps_t smps; osdev_t os_handle = vap->iv_ic->ic_osdev; smps = (ieee80211_pwrsave_smps_t)OS_MALLOC(os_handle,sizeof(struct ieee80211_pwrsave_smps),0); if (smps) { OS_MEMZERO(smps, sizeof(struct ieee80211_pwrsave_smps)); /* * Initialize pwrsave timer */ OS_INIT_TIMER(os_handle, &smps->ips_timer, ieee80211_pwrsave_smps_timer, smps); if (smpsDynamic && IEEE80211_HAS_DYNAMIC_SMPS_CAP(vap->iv_ic)) { ieee80211_vap_dynamic_mimo_ps_set(vap); } else { ieee80211_vap_dynamic_mimo_ps_clear(vap); } smps->ips_smPowerSaveState = IEEE80211_SM_PWRSAVE_DISABLED; smps->ips_connected = false; smps->ips_vap = vap; ieee80211_vap_register_event_handler(vap,ieee80211_pwrsave_smps_vap_event_handler,(void *)smps ); } return smps; }
static void ieee80211_reset_stats(struct ieee80211vap *vap, int reset_hw) { struct ieee80211com *ic = vap->iv_ic; OS_MEMZERO(&vap->iv_unicast_stats, sizeof(struct ieee80211_mac_stats)); OS_MEMZERO(&vap->iv_multicast_stats, sizeof(struct ieee80211_mac_stats)); if (reset_hw) { OS_MEMZERO(&ic->ic_phy_stats[0], sizeof(struct ieee80211_phy_stats) * IEEE80211_MODE_MAX); /* clear H/W phy counters */ ic->ic_clear_phystats(ic); } }
int ath_rfkill_attach(struct ath_softc *sc) { u_int32_t rfsilent = 0; struct ath_hal *ah = sc->sc_ah; struct ath_rfkill_info *rfkill = &sc->sc_rfkill; OS_MEMZERO(rfkill, sizeof(struct ath_rfkill_info)); ath_hal_getrfkillinfo(ah, &rfsilent); rfkill->rf_gpio_select = rfsilent & 0xffff; rfkill->rf_gpio_polarity = (rfsilent >> 16) & 0xffff; rfkill->rf_hasint = ath_hal_hasrfkillInt(ah) ? AH_TRUE : AH_FALSE; ath_hal_enable_rfkill(ah, AH_TRUE); ath_initialize_timer(sc->sc_osdev, &rfkill->rf_timer, ATH_RFKILL_POLL_INTERVAL, (timer_handler_function)ath_rfkill_poll, sc); /* * This flag is used by the WAR for RkFill Delay during power resume. * This flag is used to skip the initial check after system boot. */ rfkill->delay_chk_start = AH_FALSE; return 0; }
int tx99_attach(struct ath_softc *sc) { struct ath_tx99 *tx99 = sc->sc_tx99; if (tx99 != NULL) { DPRINTF(sc, ATH_DEBUG_TX99, "%s: sc_tx99 was not NULL\n", __FUNCTION__); return EINVAL; } tx99 = (struct ath_tx99 *)OS_MALLOC(sc->sc_osdev, sizeof(struct ath_tx99), GFP_KERNEL); if (tx99 == NULL) { DPRINTF(sc, ATH_DEBUG_TX99, "%s: no memory for tx99 attach\n", __FUNCTION__); return ENOMEM; } OS_MEMZERO(tx99, sizeof(struct ath_tx99)); sc->sc_tx99 = tx99; tx99->stop = tx99_stop; tx99->start = tx99_start; tx99->tx99_state = 0; tx99->txpower = 60; tx99->txrate = 54000; tx99->txrc = 0x0c; tx99->txfreq = 2412;/* ieee channel frequency */ tx99->txmode = 0; tx99->chanmask = 7; tx99->recv = 0; tx99->htmode = 0; tx99->htext = 0; return 0; }
/* * returns number of vaps ready. */ u_int16_t ieee80211_vaps_ready(struct ieee80211com *ic, enum ieee80211_opmode opmode) { struct ieee80211_iter_vaps_ready_arg params; u_int16_t nready = 0; OS_MEMZERO(¶ms, sizeof(params)); wlan_iterate_vap_list(ic,ieee80211_vap_iter_ready_vaps,(void *) ¶ms); switch(opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_BTAMP: nready = params.num_ap_vaps_ready; break; case IEEE80211_M_IBSS: nready = params.num_ibss_vaps_ready; break; case IEEE80211_M_STA: nready = params.num_sta_vaps_ready; break; default: break; } return nready; }
u_int8_t *ret_byte_copied_fft_data(struct ath_softc *sc, struct ath_desc *ds, struct ath_buf *bf) { #define ALLOC_FFT_DATA_SIZE 256 struct ath_spectral *spectral=sc->sc_spectral; u_int16_t datalen = ds->ds_rxstat.rs_datalen; u_int8_t *pfft_data=NULL, *byte_ptr=NULL, bmapwt=0,maxindex=0; int i=0; pfft_data = (u_int8_t*)OS_MALLOC(sc->sc_osdev, ALLOC_FFT_DATA_SIZE, 0); if ((!pfft_data) || (datalen > spectral->spectral_data_len + 2)) return NULL; OS_MEMZERO(pfft_data, ALLOC_FFT_DATA_SIZE); for (i=0; i<datalen; i++) { byte_ptr = (u_int8_t*)(((u_int8_t*)bf->bf_vdata + i)); pfft_data[i]=((*byte_ptr) & 0xFF); } get_ht20_bmap_maxindex(sc,pfft_data,datalen,&bmapwt, &maxindex); SPECTRAL_DPRINTK(sc, ATH_DEBUG_SPECTRAL1, "%s %d HT20 bmap=%d maxindex=%d\n", __func__, __LINE__, bmapwt, maxindex); return pfft_data; #undef ALLOC_FFT_DATA_SIZE }
int ieee80211_power_alloc_tim_bitmap(struct ieee80211vap *vap) { u_int8_t *tim_bitmap = NULL; u_int32_t old_len = vap->iv_tim_len; //printk("[%s] entry\n",__func__); vap->iv_tim_len = howmany(vap->iv_max_aid, 8) * sizeof(u_int8_t); tim_bitmap = OS_MALLOC(vap->iv_ic->ic_osdev, vap->iv_tim_len, 0); if(!tim_bitmap) { vap->iv_tim_len = old_len; return -1; } OS_MEMZERO(tim_bitmap, vap->iv_tim_len); if (vap->iv_tim_bitmap) { OS_MEMCPY(tim_bitmap, vap->iv_tim_bitmap, vap->iv_tim_len > old_len ? old_len : vap->iv_tim_len); OS_FREE(vap->iv_tim_bitmap); } vap->iv_tim_bitmap = tim_bitmap; //printk("[%s] exits\n",__func__); return 0; }
/* Detach function for green_ap */ int ath_green_ap_detach( struct ieee80211com* ic) { struct ath_green_ap* green_ap = ic->ic_green_ap; /* Sanity check */ if ( green_ap == NULL ) { GREEN_AP_INFO("Module not loaded\n"); return -1; } spin_lock(&ic->green_ap_ps_lock); /* Delete the timer if it is on */ if (ath_timer_is_initialized(&green_ap->ps_timer)) { ath_cancel_timer(&green_ap->ps_timer, CANCEL_NO_SLEEP); ath_free_timer(&green_ap->ps_timer); } /* Release the memory */ OS_MEMZERO(green_ap, sizeof(*green_ap)); OS_FREE(green_ap); ic->ic_green_ap = NULL; spin_unlock(&ic->green_ap_ps_lock); spin_lock_destroy(&ic->green_ap_ps_lock); GREEN_AP_INFO("Green-AP : Detached\n"); return 0; }EXPORT_SYMBOL(ath_green_ap_detach);
/* * Update rate-control state on station associate/reassociate. */ static int ath_rate_newassoc_11n(struct ath_softc *sc, struct ath_node *an, int isnew, unsigned int capflag, struct ieee80211_rateset *negotiated_rates, struct ieee80211_rateset *negotiated_htrates) { struct atheros_node *oan = ATH_NODE_ATHEROS(an); if (isnew) { u_int32_t node_maxRate = (u_int32_t) (-1); MAX_RATES maxRates; OS_MEMZERO(&maxRates, sizeof(maxRates)); /* FIX ME:XXXX Looks like this not used at all. */ oan->htcap = ((capflag & ATH_RC_DS_FLAG) ? WLAN_RC_DS_FLAG : 0) | ((capflag & ATH_RC_TS_FLAG) ? WLAN_RC_TS_FLAG : 0) | ((capflag & ATH_RC_SGI_FLAG) ? WLAN_RC_SGI_FLAG : 0) | ((capflag & ATH_RC_HT_FLAG) ? WLAN_RC_HT_FLAG : 0) | #ifdef ATH_SUPPORT_TxBF ((capflag & ATH_RC_TXBF_FLAG) ? WLAN_RC_TxBF_FLAG:0)| #endif ((capflag & ATH_RC_CW40_FLAG) ? WLAN_RC_40_FLAG : 0); /* Rx STBC is a 2-bit mask. Needs to convert from ath definition to wlan definition. */ oan->htcap |= (((capflag & ATH_RC_RX_STBC_FLAG) >> ATH_RC_RX_STBC_FLAG_S) << WLAN_RC_STBC_FLAG_S); /* Enable stbc only for more than one tx chain */ if (sc->sc_txstbcsupport && (sc->sc_tx_chainmask != 1)) { oan->stbc = (capflag & ATH_RC_RX_STBC_FLAG) >> ATH_RC_RX_STBC_FLAG_S; } else {
/* Start monitor mode */ int wlan_mlme_start_monitor(wlan_if_t vaphandle) { struct ieee80211vap *vap = vaphandle; struct ieee80211com *ic = vap->iv_ic; ieee80211_reset_request req; ASSERT(vap->iv_opmode == IEEE80211_M_MONITOR); /* Directly goes to ready state for monitor mode */ ieee80211_vap_start(vap); OS_MEMZERO(&req, sizeof(req)); req.reset_hw = 1; req.type = IEEE80211_RESET_TYPE_INTERNAL; req.no_flush = 0; wlan_reset_start(vap, &req); wlan_reset(vap, &req); wlan_reset_end(vap, &req); if (vap->iv_des_chan[vap->iv_des_mode] != IEEE80211_CHAN_ANYC) { ieee80211_set_channel(ic, vap->iv_des_chan[vap->iv_des_mode]); vap->iv_bsschan = ic->ic_curchan; } return 0; }
struct atheros_node * ath_rate_node_alloc(struct atheros_vap *avp) { struct atheros_node *anode; RC_OS_MALLOC(&anode, sizeof(struct atheros_node), RATECTRL_MEMTAG); if (anode == NULL) return NULL; OS_MEMZERO(anode, sizeof(struct atheros_node)); anode->avp = avp; anode->asc = avp->asc; #if ATH_SUPPORT_IQUE /*zhaoyang1 transplant from 717*/ /*xiaruixin modify for find rate reset*/ anode->rcFunc[WME_AC_VI].rcUpdate = &rcUpdate_11n;//&rcUpdate_11nViVo; anode->rcFunc[WME_AC_VO].rcUpdate = &rcUpdate_11n;//&rcUpdate_11nViVo; anode->rcFunc[WME_AC_BE].rcUpdate = &rcUpdate_11n; anode->rcFunc[WME_AC_BK].rcUpdate = &rcUpdate_11n; anode->rcFunc[WME_AC_VI].rcFind = &rcRateFind_11n;//&rcRateFind_11nViVo; anode->rcFunc[WME_AC_VO].rcFind = &rcRateFind_11n;//&rcRateFind_11nViVo; anode->rcFunc[WME_AC_BE].rcFind = &rcRateFind_11n; anode->rcFunc[WME_AC_BK].rcFind = &rcRateFind_11n; /*xiaruixin modify end */ /*zhaoyang1 transplant end*/ #endif return anode; }
/* * BSS channel request. (Scan Module) */ int ieee80211_resmgr_request_bsschan(ieee80211_resmgr_t resmgr, u_int16_t reqid) { struct ieee80211_resmgr_sm_event event; struct vap_iter_check_state_params params; struct ieee80211com *ic = resmgr->ic; event.vap = NULL; event.chan = NULL; if (ieee80211_resmgr_active(ic)) { /* ResMgr SM active, check for active VAPs */ OS_MEMZERO(¶ms, sizeof(struct vap_iter_check_state_params)); wlan_iterate_vap_list(ic, ieee80211_vap_iter_check_state, ¶ms); /* If there are no VAPs active, there is no BSS channel. Scanner can continueously scan all channels */ if (!params.vaps_running && !params.vap_starting) { return EINVAL; } } /* Post event to ResMgr SM */ if (ieee80211_resmgr_sm_dispatch(resmgr, IEEE80211_RESMGR_EVENT_BSSCHAN_REQUEST, &event) == EOK){ return EBUSY; /* processing req asynchronously */ } else { return EOK; /* caller can change channel */ } }
void *wmi_unified_attach(ol_scn_t scn_handle, wma_process_fw_event_handler_cbk func) { struct wmi_unified *wmi_handle; wmi_handle = (struct wmi_unified *)os_malloc(NULL, sizeof(struct wmi_unified), GFP_ATOMIC); if (wmi_handle == NULL) { printk("allocation of wmi handle failed %zu \n", sizeof(struct wmi_unified)); return NULL; } OS_MEMZERO(wmi_handle, sizeof(struct wmi_unified)); wmi_handle->scn_handle = scn_handle; cdf_atomic_init(&wmi_handle->pending_cmds); cdf_atomic_init(&wmi_handle->is_target_suspended); cdf_spinlock_init(&wmi_handle->eventq_lock); cdf_nbuf_queue_init(&wmi_handle->event_queue); #ifdef CONFIG_CNSS cnss_init_work(&wmi_handle->rx_event_work, wmi_rx_event_work); #else INIT_WORK(&wmi_handle->rx_event_work, wmi_rx_event_work); #endif #ifdef WMI_INTERFACE_EVENT_LOGGING cdf_spinlock_init(&wmi_handle->wmi_record_lock); #endif wmi_handle->wma_process_fw_event_handler_cbk = func; return wmi_handle; }
/* * Attach the "VAP_ATH_INFO" manager. */ ieee80211_vap_ath_info_t ieee80211_vap_ath_info_attach( wlan_if_t vap) { ieee80211_vap_ath_info_t h_mgr; if (vap->iv_vap_ath_info_handle) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_VAP_ATH_INFO, "%s: Error: already attached.\n", __func__); return NULL; } /* Allocate ResMgr data structures */ h_mgr = (ieee80211_vap_ath_info_t) OS_MALLOC(vap->iv_ic->ic_osdev, sizeof(struct ieee80211_vap_ath_info), 0); if (h_mgr == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_VAP_ATH_INFO, "%s : memory alloction failed. size=%d\n", __func__, sizeof(struct ieee80211_vap_ath_info)); return NULL; } OS_MEMZERO(h_mgr, sizeof(struct ieee80211_vap_ath_info)); h_mgr->vap = vap; spin_lock_init(&(h_mgr->lock)); return h_mgr; }
void start_spectral_scan(struct ath_softc *sc) { HAL_SPECTRAL_PARAM spectral_params; struct ath_spectral *spectral = sc->sc_spectral; if (spectral == NULL) { SPECTRAL_DPRINTK(sc, ATH_DEBUG_SPECTRAL1, "%s: sc_spectral is NULL, HW may not be spectral capable\n", __func__); return; } OS_MEMZERO(&spectral_params, sizeof (HAL_SPECTRAL_PARAM)); spectral_params.ss_count = 128; spectral_params.ss_short_report = sc->sc_spectral->params.ss_short_report; spectral_params.ss_period = sc->sc_spectral->params.ss_period; spectral_params.ss_fft_period = sc->sc_spectral->params.ss_period; ath_hal_configure_spectral(sc->sc_ah, &spectral_params); spectral_get_thresholds(sc, &sc->sc_spectral->params); spectral->this_scan_phy_err = 0; spectral->send_single_packet = 1; spectral->spectral_sent_msg = 0; spectral->classify_scan = 0; spectral->num_spectral_data=0; spectral->eacs_this_scan_spectral_data=0; ath_hal_start_spectral_scan(sc->sc_ah); }
/* * Setup ANI handling. Sets all thresholds and reset the * channel statistics. Note that ar5416AniReset should be * called by ar5416Reset before anything else happens and * that's where we force initial settings. */ void ar5416AniAttach(struct ath_hal *ah, const struct ar5212AniParams *params24, const struct ar5212AniParams *params5, HAL_BOOL enable) { struct ath_hal_5212 *ahp = AH5212(ah); if (params24 != AH_NULL) { OS_MEMCPY(&ahp->ah_aniParams24, params24, sizeof(*params24)); setPhyErrBase(ah, &ahp->ah_aniParams24); } if (params5 != AH_NULL) { OS_MEMCPY(&ahp->ah_aniParams5, params5, sizeof(*params5)); setPhyErrBase(ah, &ahp->ah_aniParams5); } OS_MEMZERO(ahp->ah_ani, sizeof(ahp->ah_ani)); /* Enable MIB Counters */ enableAniMIBCounters(ah, &ahp->ah_aniParams24 /*XXX*/); if (enable) { /* Enable ani now */ HALASSERT(params24 != AH_NULL && params5 != AH_NULL); ahp->ah_procPhyErr |= HAL_ANI_ENA; } else { ahp->ah_procPhyErr &= ~HAL_ANI_ENA; } }
/* * Find the next events for requests starting from the highest priority one * to the first active one. Subsequent lower priority requests are not processed. * Note: earliest_next_event returns the request that has the earliest * next event but only for requests equal or greater than highest_pri_active. */ static void find_all_next_events( ieee80211_p2p_go_schedule_t go_schedule, u_int32_t event_time, int *highest_pri_active, int *earliest_next_event) { int i, earliest_event; u_int32_t earliest_next_time = 0; earliest_event = -1; OS_MEMZERO(&go_schedule->req_state[0], P2P_GO_PS_MAX_NUM_SCHEDULE_REQ * sizeof(struct request_state)); for (i=(go_schedule->num_schedules-1); i>=0; i--) { int sort_idx; struct request_state *req = &go_schedule->req_state[i]; sort_idx = go_schedule->sorted_sch_idx[i]; req->valid = true; req->expired = find_next_event(go_schedule, event_time, &(go_schedule->request[sort_idx]), &req->is_active, &req->next_event_time); if (req->expired) { continue; } if (earliest_event == -1) { /* Init. the earliest time for next event */ earliest_next_time = req->next_event_time; earliest_event = i; } else { if (TSFTIME_IS_GREATER(earliest_next_time, req->next_event_time)) { /* Found an earlier next event */ earliest_event = i; earliest_next_time = req->next_event_time; } } if (req->is_active) { /* Find the highest priority request that is active */ *highest_pri_active = i; *earliest_next_event = earliest_event; IEEE80211_DPRINTF_IC(go_schedule->ic, IEEE80211_VERBOSE_DETAILED, IEEE80211_MSG_P2P_GO_SCH, "%s: highest_pri_active=%d, earliest_next_event=%d\n", __func__, *highest_pri_active, *earliest_next_event); return; } } *highest_pri_active = -1; *earliest_next_event = earliest_event; IEEE80211_DPRINTF_IC(go_schedule->ic, IEEE80211_VERBOSE_FUNCTION, IEEE80211_MSG_P2P_GO_SCH, "%s: highest_pri_active=%d, earliest_next_event=%d\n", __func__, *highest_pri_active, *earliest_next_event); return; }
void init_classifier(struct ath_softc *sc) { struct ath_spectral *spectral=sc->sc_spectral; struct ss *lwrband, *uprband; lwrband = &spectral->bd_lower; uprband = &spectral->bd_upper; OS_MEMZERO(lwrband, sizeof(struct ss)); OS_MEMZERO(uprband, sizeof(struct ss)); if (spectral->sc_spectral_20_40_mode) { /* AP is in 20-40 mode so we will need to process both extension and primary channel spectral data */ if (sc->sc_extchan.channel < sc->sc_curchan.channel) { lwrband->b_chan_in_mhz = sc->sc_extchan.channel; uprband->b_chan_in_mhz = sc->sc_curchan.channel; uprband->dc_in_mhz = (uprband->b_chan_in_mhz - 10); uprband->lwr_band_data = 1; lwrband->lwr_band_data = 0; } else { lwrband->b_chan_in_mhz = sc->sc_curchan.channel; uprband->b_chan_in_mhz = sc->sc_extchan.channel; uprband->dc_in_mhz = (uprband->b_chan_in_mhz + 10); uprband->lwr_band_data = 0; lwrband->lwr_band_data = 1; } uprband->dynamic_ht2040_en = spectral->sc_spectral_20_40_mode; uprband->dc_index = spectral->spectral_dc_index; SPECTRAL_DPRINTK(sc, ATH_DEBUG_SPECTRAL1,"uprband->b_chan_in_mhz = %d\n", uprband->b_chan_in_mhz); SPECTRAL_DPRINTK(sc, ATH_DEBUG_SPECTRAL1,"uprband->dc_in_mhz = %d\n", uprband->dc_in_mhz); lwrband->dc_in_mhz = uprband->dc_in_mhz; } else { /* AP is in 20MHz mode so only primary channel spectral data counts*/ lwrband->b_chan_in_mhz = sc->sc_curchan.channel; lwrband->dc_in_mhz = lwrband->b_chan_in_mhz; lwrband->lwr_band_data = 1; } lwrband->dynamic_ht2040_en = spectral->sc_spectral_20_40_mode; lwrband->dc_index = spectral->spectral_dc_index; SPECTRAL_DPRINTK(sc, ATH_DEBUG_SPECTRAL1,"lwrband->b_chan_in_mhz = %d\n", lwrband->b_chan_in_mhz); SPECTRAL_DPRINTK(sc, ATH_DEBUG_SPECTRAL1,"lwrband->dc_in_mhz = %d\n", lwrband->dc_in_mhz); }