/* * ASSOC */ static void ieee80211_assoc_state_assoc_entry(void *ctx) { wlan_assoc_sm_t sm = (wlan_assoc_sm_t) ctx; ++sm->cur_assoc_attempts; if (wlan_scan_entry_assoc_state(sm->scan_entry) == AP_ASSOC_STATE_ASSOC) { if (wlan_mlme_reassoc_request(sm->vap_handle,sm->prev_bssid, sm->max_mgmt_time) !=0 ) { IEEE80211_DPRINTF(sm->vap_handle,IEEE80211_MSG_STATE,"%s: reassoc request failed retrying ...\n",__func__); sm->timeout_event = IEEE80211_ASSOC_EVENT_TIMEOUT, OS_SET_TIMER(&sm->sm_timer,ASSOC_RETRY_TIME); return; } } else { if (wlan_mlme_assoc_request(sm->vap_handle, sm->max_mgmt_time) !=0 ) { IEEE80211_DPRINTF(sm->vap_handle,IEEE80211_MSG_STATE,"%s: assoc request failed retrying ...\n",__func__); sm->timeout_event = IEEE80211_ASSOC_EVENT_TIMEOUT, OS_SET_TIMER(&sm->sm_timer,ASSOC_RETRY_TIME); return; } } }
/* * AUTH */ static void ieee80211_assoc_state_auth_entry(void *ctx) { wlan_assoc_sm_t sm = (wlan_assoc_sm_t) ctx; ++sm->cur_auth_attempts; if (wlan_mlme_auth_request(sm->vap_handle,sm->max_mgmt_time) !=0 ) { IEEE80211_DPRINTF(sm->vap_handle,IEEE80211_MSG_STATE,"%s: auth_request failed retrying ...\n",__func__); sm->timeout_event = IEEE80211_ASSOC_EVENT_TIMEOUT, OS_SET_TIMER(&sm->sm_timer,AUTH_RETRY_TIME); return; } }
static OS_TIMER_FUNC(dfs_task) { struct ieee80211com *ic; struct ath_dfs *dfs = NULL; OS_GET_TIMER_ARG(ic, struct ieee80211com *); dfs = (struct ath_dfs *)ic->ic_dfs; /* * XXX no locking?! */ if (dfs_process_radarevent(dfs, ic->ic_curchan)) { #ifndef ATH_DFS_RADAR_DETECTION_ONLY /* * This marks the channel (and the extension channel, if HT40) as * having seen a radar event. It marks CHAN_INTERFERENCE and * will add it to the local NOL implementation. * * This is only done for 'usenol=1', as the other two modes * don't do radar notification or CAC/CSA/NOL; it just notes * there was a radar. */ if (dfs->dfs_rinfo.rn_use_nol == 1) { //dfs_channel_mark_radar(dfs, ic->ic_curchan); } #endif /* ATH_DFS_RADAR_DETECTION_ONLY */ /* * This calls into the umac DFS code, which sets the umac related * radar flags and begins the channel change machinery. * * XXX TODO: the umac NOL code isn't used, but IEEE80211_CHAN_RADAR * still gets set. Since the umac NOL code isn't used, that flag * is never cleared. This needs to be fixed. See EV 105776. */ if (dfs->dfs_rinfo.rn_use_nol == 1) { ic->ic_dfs_notify_radar(ic, ic->ic_curchan); } else if (dfs->dfs_rinfo.rn_use_nol == 0) { /* * For the test mode, don't do a CSA here; but setup the * test timer so we get a CSA _back_ to the original channel. */ OS_CANCEL_TIMER(&dfs->ath_dfstesttimer); dfs->ath_dfstest = 1; dfs->ath_dfstest_ieeechan = ic->ic_curchan->ic_ieee; dfs->ath_dfstesttime = 1; /* 1ms */ OS_SET_TIMER(&dfs->ath_dfstesttimer, dfs->ath_dfstesttime); } } dfs->ath_radar_tasksched = 0; }
static OS_TIMER_FUNC(wps_led_blink) { static int WPSled = WPS_LED_ON, sec = 0; ath_gpio_out_val(WPS_LED_GPIO, WPSled); WPSled = !WPSled; sec++; if (sec < WPS_TIME_OUT) { OS_SET_TIMER(&os_timer_t, 1000); } else { sec = 0; wps_led_blinking = 0; OS_CANCEL_TIMER(&os_timer_t); ath_gpio_out_val(WPS_LED_GPIO, initial_led_state); } }
static OS_TIMER_FUNC(wps_led_fail) { static int WPSled = WPS_LED_ON, sec = 0; ath_gpio_out_val(WPS_LED_GPIO, WPSled); WPSled = !WPSled; sec++; if (sec < 250 * 5) {//Keep blinking for 250 seconds & timer callback kicks in every 200 ms OS_SET_TIMER(&os_timer_t, 200); } else { sec = 0; wps_led_blinking = 0; OS_CANCEL_TIMER(&os_timer_t); ath_gpio_out_val(WPS_LED_GPIO, initial_led_state); } }
/* END ADD: c00217102 2012-8-12 FOR WS323 */ static OS_TIMER_FUNC(wps_led_blink) { gpio_wps_other_led_off(); ath_gpio_out_val(wps_led_gpio, WPSled); WPSled = !WPSled; sec++; if (sec < 130) { OS_SET_TIMER(&os_timer_t, 1000); } else { sec = 0; OS_CANCEL_TIMER(&os_timer_t); WPSled=WPS_LED_OFF; ath_gpio_out_val(wps_led_gpio, WPSled); simple_config_led_state = SIMPLE_CONFIG_OFF; } }
static OS_TIMER_FUNC(power_led_blink) { static int power_led_status = POWER_LED_OFF, power_on_timeout = 0; OS_CANCEL_TIMER(&power_on_timer); if (power_on_finish) { ath_gpio_out_val(POWER_ON_GLED_GPIO, POWER_LED_ON); } else if (++power_on_timeout >= POWER_ON_TIMEOUT) { ath_gpio_out_val(POWER_ON_GLED_GPIO, POWER_LED_OFF); ath_gpio_config_input(POWER_ON_GLED_GPIO); ath_gpio_config_output(POWER_ON_RLED_GPIO); ath_gpio_out_val(POWER_ON_RLED_GPIO, POWER_LED_ON); } else { ath_gpio_out_val(POWER_ON_GLED_GPIO, power_led_status); power_led_status = !power_led_status; OS_SET_TIMER(&power_on_timer, POWER_LED_BLINK_INTERVAL); } }
/* START ADD: c00217102 2012-8-12 FOR WS323 */ static OS_TIMER_FUNC(wps_led_on) { static int WPSled = WPS_LED_ON, secd = 0; gpio_wps_other_led_off(); ath_gpio_out_val(wps_led_gpio, WPSled); if (secd < 100) { secd++; OS_SET_TIMER(&os_timer_t, 100); } else { secd = 0; OS_CANCEL_TIMER(&os_timer_t); ath_gpio_out_val(wps_led_gpio, WPS_LED_OFF); simple_config_led_state = SIMPLE_CONFIG_OFF; printk("OS_TIMER_FUNC(wps_led_on) over\n"); } }
/* * JOIN */ static void ieee80211_btamp_conn_state_join_entry(void *ctx) { wlan_btamp_conn_sm_t sm = (wlan_btamp_conn_sm_t) ctx; struct ieee80211_node *ni = NULL; struct ieee80211vap *vap = sm->vap_handle; sm->is_join = 1; ni = ieee80211_vap_find_node(vap, sm->peer); OS_SET_TIMER(&sm->sm_timer, sm->max_mgmt_time); if (ni) { ieee80211_send_probereq(ni, vap->iv_myaddr, sm->peer, sm->peer, ni->ni_essid, ni->ni_esslen, NULL, 0); ieee80211_free_node(ni); IEEE80211_DPRINTF(sm->vap_handle,IEEE80211_MSG_STATE,"%s: probe request", __func__); } }
static void ieee80211_pwrsave_smps_vap_event_handler (ieee80211_vap_t vap, ieee80211_vap_event *event, void *arg) { ieee80211_pwrsave_smps_t smps = (ieee80211_pwrsave_smps_t) arg; switch(event->type) { case IEEE80211_VAP_UP: if (!smps->ips_connected ) { smps->ips_connected=TRUE; OS_SET_TIMER(&smps->ips_timer,IEEE80211_PWRSAVE_TIMER_INTERVAL); } break; case IEEE80211_VAP_FULL_SLEEP: case IEEE80211_VAP_DOWN: OS_CANCEL_TIMER(&smps->ips_timer); smps->ips_connected = FALSE; break; default: break; } }
/* * Periodically gather performance statistics. Currently, * we measure throughput and PER over a time window. */ static OS_TIMER_FUNC(ieee80211_prdperfstats_gather) { struct ieee80211com *ic; OS_GET_TIMER_ARG(ic, struct ieee80211com *); IEEE80211_PRDPERFSTATS_THRPUT_LOCK(ic); if (ic->ic_thrput.is_enab) { if (unlikely(!ic->ic_thrput.is_started)) { ieee80211_prdperfstat_thrput_start(ic); } else { ic->ic_thrput.timer_count++; if (ic->ic_thrput.timer_count == PRDPERFSTAT_THRPUT_INTERVAL_MULT) { ieee80211_prdperfstat_thrput_update_hist(ic); ic->ic_thrput.timer_count = 0; } } } IEEE80211_PRDPERFSTATS_THRPUT_UNLOCK(ic); IEEE80211_PRDPERFSTATS_PER_LOCK(ic); if (ic->ic_per.is_enab) { if (unlikely(!ic->ic_per.is_started)) { ieee80211_prdperfstat_per_start(ic); } else { ic->ic_per.timer_count++; if (ic->ic_per.timer_count == PRDPERFSTAT_PER_INTERVAL_MULT) { ieee80211_prdperfstat_per_update_hist(ic); ic->ic_per.timer_count = 0; } } } IEEE80211_PRDPERFSTATS_PER_UNLOCK(ic); OS_SET_TIMER(&ic->ic_prdperfstats_timer, PRDPERFSTATS_PERIOD_MS); }
int ieee80211_mlme_auth_request_btamp(wlan_if_t vaphandle, u_int8_t *peer_addr, u_int32_t timeout) { struct ieee80211vap *vap = vaphandle; struct ieee80211_mlme_priv *mlme_priv = vap->iv_mlme_priv; struct ieee80211_node *ni = NULL; int error = 0; IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s\n", __func__); ni = ieee80211_find_txnode(vap, peer_addr); //ni = ieee80211_find_node(&vap->iv_ic->ic_sta, peer_addr); if (ni == NULL) { return EINVAL; } /* Wait for auth seq number 2 (open response) */ ASSERT(mlme_priv->im_request_type == MLME_REQ_NONE); mlme_priv->im_request_type = MLME_REQ_AUTH; mlme_priv->im_expected_auth_seq_number = IEEE80211_AUTH_OPEN_RESPONSE; /* Set the timeout timer for authenticate failure case */ OS_SET_TIMER(&mlme_priv->im_timeout_timer, timeout); /* Send the authentication packet */ error = ieee80211_send_auth(ni, IEEE80211_AUTH_OPEN_REQUEST, 0, NULL, 0); ieee80211_free_node(ni); if (error) { goto err; } return error; err: mlme_priv->im_request_type = MLME_REQ_NONE; OS_CANCEL_TIMER(&mlme_priv->im_timeout_timer); return error; }
static int mlme_assoc_reassoc_request_btamp(wlan_if_t vaphandle, u_int8_t *mac_addr, int reassoc, u_int8_t *prev_bssid, u_int32_t timeout) { struct ieee80211vap *vap = vaphandle; struct ieee80211_mlme_priv *mlme_priv = vap->iv_mlme_priv; struct ieee80211_node *ni = NULL; int error = 0; IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s\n", __func__); ni = ieee80211_find_txnode(vap, mac_addr); //ni = ieee80211_find_node(&vap->iv_ic->ic_sta, mac_addr); if (ni == NULL) { return EINVAL; } ASSERT(mlme_priv->im_request_type == MLME_REQ_NONE); mlme_priv->im_request_type = reassoc ? MLME_REQ_REASSOC : MLME_REQ_ASSOC; /* Set the timeout timer for association failure case */ OS_SET_TIMER(&mlme_priv->im_timeout_timer, timeout); /* Transmit frame */ error = ieee80211_send_assoc(ni, reassoc, prev_bssid); ieee80211_free_node(ni); if (error) { goto err; } return error; err: mlme_priv->im_request_type = MLME_REQ_NONE; OS_CANCEL_TIMER(&mlme_priv->im_timeout_timer); return error; }
void ieee80211_mlme_join_infra_continue(struct ieee80211vap *vap, int32_t status) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; struct ieee80211_mlme_priv *mlme_priv = vap->iv_mlme_priv; u_int32_t join_timeout_ms; if (mlme_priv->im_request_type != MLME_REQ_JOIN_INFRA) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s : im_request_type != JOIN_INFRA\n", __func__); return; } if (status != EOK) { mlme_priv->im_request_type = MLME_REQ_NONE; IEEE80211_DELIVER_EVENT_MLME_JOIN_COMPLETE_INFRA(vap, IEEE80211_STATUS_UNSPECIFIED); return; } /* iv_bss is valid only after ieee80211_sta_join */ ni = vap->iv_bss; IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s ni=%02X:%02X:%02X:%02X:%02X:%02X\n", __func__, ni->ni_macaddr[0], ni->ni_macaddr[1], ni->ni_macaddr[2], ni->ni_macaddr[3], ni->ni_macaddr[4], ni->ni_macaddr[5]); /* Update erp info */ if (ni->ni_erp & IEEE80211_ERP_USE_PROTECTION) ic->ic_flags |= IEEE80211_F_USEPROT; else ic->ic_flags &= ~IEEE80211_F_USEPROT; ic->ic_update_protmode(ic); if(ni->ni_erp & IEEE80211_ERP_LONG_PREAMBLE) ic->ic_flags |= IEEE80211_F_USEBARKER; else ic->ic_flags &= (~IEEE80211_F_USEBARKER); /* Update slot time info */ ieee80211_set_shortslottime(ic, IEEE80211_IS_CHAN_A(vap->iv_bsschan) || IEEE80211_IS_CHAN_11NA(vap->iv_bsschan) || (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME)); /* Put underlying H/W to JOIN state */ ieee80211_vap_join(vap); #ifdef ATH_SUPPORT_TxBF ieee80211_init_txbf(ic, ni); #endif /* Send a direct probe to increase the odds of receiving a probe response */ ieee80211_send_probereq(ni, ic->ic_myaddr, ni->ni_bssid, ni->ni_bssid, ni->ni_essid, ni->ni_esslen, vap->iv_opt_ie.ie, vap->iv_opt_ie.length); /* Set the timeout timer for Join Failure case. */ join_timeout_ms = IEEE80211_TU_TO_MS(mlme_priv->im_timeout * ni->ni_intval); IEEE80211_DPRINTF(vap, IEEE80211_MSG_MLME, "%s: Setting Join Timeout timer for %d ms\n", __func__, join_timeout_ms); OS_SET_TIMER(&mlme_priv->im_timeout_timer, join_timeout_ms); /* Set the appropriate filtering function and wait for Join Beacon */ MLME_WAIT_FOR_BSS_JOIN(mlme_priv); }
/* * This is called each time a channel change occurs, to (potentially) enable * the radar code. */ int dfs_radar_enable(struct ieee80211com *ic, struct ath_dfs_radar_tab_info *radar_info, int no_cac) { int is_ext_ch=IEEE80211_IS_CHAN_11N_HT40(ic->ic_curchan); int is_fastclk = 0; //u_int32_t rfilt; struct ath_dfs *dfs=(struct ath_dfs *)ic->ic_dfs; struct ieee80211_channel *chan=ic->ic_curchan, *ext_ch = NULL; if (dfs == NULL) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: ic_dfs is NULL\n", __func__); return -EIO; } ic->ic_dfs_disable(ic, no_cac); /* * Setting country code might change the DFS domain * so initialize the DFS Radar filters */ dfs_init_radar_filters(ic, radar_info); #if ATH_SUPPORT_DFS && ATH_SUPPORT_STA_DFS if ((ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_IBSS || (ic->ic_opmode == IEEE80211_M_STA && ieee80211com_has_cap_ext(dfs->ic,IEEE80211_CEXT_STADFS)))) { #else if ((ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_IBSS)) { #endif if (IEEE80211_IS_CHAN_DFS(chan)) { struct dfs_state *rs_pri=NULL, *rs_ext=NULL; u_int8_t index_pri, index_ext; #ifdef ATH_ENABLE_AR dfs->dfs_proc_phyerr |= DFS_AR_EN; #endif dfs->dfs_proc_phyerr |= DFS_RADAR_EN; //printk( "%s[%d]: ==== 0x%08x\n", __func__, __LINE__, dfs->dfs_proc_phyerr); if (is_ext_ch) { ext_ch = ieee80211_get_extchan(ic); } dfs_reset_alldelaylines(dfs); rs_pri = dfs_getchanstate(dfs, &index_pri, 0); if (ext_ch) { rs_ext = dfs_getchanstate(dfs, &index_ext, 1); } if (rs_pri != NULL && ((ext_ch==NULL)||(rs_ext != NULL))) { struct ath_dfs_phyerr_param pe; OS_MEMSET(&pe, '\0', sizeof(pe)); if (index_pri != dfs->dfs_curchan_radindex) dfs_reset_alldelaylines(dfs); dfs->dfs_curchan_radindex = (int16_t) index_pri; if (rs_ext) dfs->dfs_extchan_radindex = (int16_t) index_ext; ath_dfs_phyerr_param_copy(&pe, &rs_pri->rs_param); DFS_DPRINTK(dfs, ATH_DEBUG_DFS3, "%s: firpwr=%d, rssi=%d, height=%d, " "prssi=%d, inband=%d, relpwr=%d, " "relstep=%d, maxlen=%d\n", __func__, pe.pe_firpwr, pe.pe_rrssi, pe.pe_height, pe.pe_prssi, pe.pe_inband, pe.pe_relpwr, pe.pe_relstep, pe.pe_maxlen ); #if 0 //Not needed /* Disable strong signal fast antenna diversity */ ath_hal_setcapability(ah, HAL_CAP_DIVERSITY, HAL_CAP_STRONG_DIV, 1, NULL); #endif ic->ic_dfs_enable(ic, &is_fastclk, &pe); DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "Enabled radar detection on channel %d\n", chan->ic_freq); dfs->dur_multiplier = is_fastclk ? DFS_FAST_CLOCK_MULTIPLIER : DFS_NO_FAST_CLOCK_MULTIPLIER; DFS_DPRINTK(dfs, ATH_DEBUG_DFS3, "%s: duration multiplier is %d\n", __func__, dfs->dur_multiplier); } else DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: No more radar states left\n", __func__); } } return 0; } int dfs_control(struct ieee80211com *ic, u_int id, void *indata, u_int32_t insize, void *outdata, u_int32_t *outsize) { int error = 0; struct ath_dfs_phyerr_param peout; struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs; struct dfs_ioctl_params *dfsparams; u_int32_t val=0; #ifndef ATH_DFS_RADAR_DETECTION_ONLY struct dfsreq_nolinfo *nol; u_int32_t *data = NULL; #endif /* ATH_DFS_RADAR_DETECTION_ONLY */ int i; if (dfs == NULL) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s DFS is null\n", __func__); /* Enable/Disable DFS can be done prior to attach, So handle here */ switch (id) { case DFS_DISABLE_DETECT: ic->ic_dfs_state.ignore_dfs = 1; DFS_PRINTK("%s enable detects, ignore_dfs %d\n", __func__, ic->ic_dfs_state.ignore_dfs ? 1:0); break; case DFS_ENABLE_DETECT: ic->ic_dfs_state.ignore_dfs = 0; DFS_PRINTK("%s enable detects, ignore_dfs %d\n", __func__, ic->ic_dfs_state.ignore_dfs ? 1:0); break; default: error = -EINVAL; break; } goto bad; } //printk("%s[%d] id =%d\n", __func__, __LINE__, id); switch (id) { case DFS_SET_THRESH: if (insize < sizeof(struct dfs_ioctl_params) || !indata) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: insize=%d, expected=%d bytes, indata=%p\n", __func__, insize, sizeof(struct dfs_ioctl_params), indata); error = -EINVAL; break; } dfsparams = (struct dfs_ioctl_params *) indata; if (!dfs_set_thresholds(ic, DFS_PARAM_FIRPWR, dfsparams->dfs_firpwr)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_RRSSI, dfsparams->dfs_rrssi)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_HEIGHT, dfsparams->dfs_height)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_PRSSI, dfsparams->dfs_prssi)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_INBAND, dfsparams->dfs_inband)) error = -EINVAL; /* 5413 speicfic */ if (!dfs_set_thresholds(ic, DFS_PARAM_RELPWR, dfsparams->dfs_relpwr)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_RELSTEP, dfsparams->dfs_relstep)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_MAXLEN, dfsparams->dfs_maxlen)) error = -EINVAL; break; case DFS_GET_THRESH: if (!outdata || !outsize || *outsize <sizeof(struct dfs_ioctl_params)) { error = -EINVAL; break; } *outsize = sizeof(struct dfs_ioctl_params); dfsparams = (struct dfs_ioctl_params *) outdata; /* * Fetch the DFS thresholds using the internal representation. */ (void) dfs_get_thresholds(ic, &peout); /* * Convert them to the dfs IOCTL representation. */ ath_dfs_dfsparam_to_ioctlparam(&peout, dfsparams); break; case DFS_RADARDETECTS: if (!outdata || !outsize || *outsize < sizeof(u_int32_t)) { error = -EINVAL; break; } *outsize = sizeof (u_int32_t); *((u_int32_t *)outdata) = dfs->ath_dfs_stats.num_radar_detects; break; case DFS_DISABLE_DETECT: dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; dfs->ic->ic_dfs_state.ignore_dfs = 1; DFS_PRINTK("%s enable detects, ignore_dfs %d\n", __func__, dfs->ic->ic_dfs_state.ignore_dfs ? 1:0); break; case DFS_ENABLE_DETECT: dfs->dfs_proc_phyerr |= DFS_RADAR_EN; dfs->ic->ic_dfs_state.ignore_dfs = 0; DFS_PRINTK("%s enable detects, ignore_dfs %d\n", __func__, dfs->ic->ic_dfs_state.ignore_dfs ? 1:0); break; case DFS_DISABLE_FFT: //UMACDFS: TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, false); DFS_PRINTK("%s TODO disable FFT val=0x%x \n", __func__, val); break; case DFS_ENABLE_FFT: //UMACDFS TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, true); DFS_PRINTK("%s TODO enable FFT val=0x%x \n", __func__, val); break; case DFS_SET_DEBUG_LEVEL: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->dfs_debug_mask= *(u_int32_t *)indata; DFS_PRINTK("%s debug level now = 0x%x \n", __func__, dfs->dfs_debug_mask); if (dfs->dfs_debug_mask & ATH_DEBUG_DFS3) { /* Enable debug Radar Event */ dfs->dfs_event_log_on = 1; } else { dfs->dfs_event_log_on = 0; } break; case DFS_SET_FALSE_RSSI_THRES: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->ath_dfs_false_rssi_thres= *(u_int32_t *)indata; DFS_PRINTK("%s false RSSI threshold now = 0x%x \n", __func__, dfs->ath_dfs_false_rssi_thres); break; case DFS_SET_PEAK_MAG: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->ath_dfs_peak_mag= *(u_int32_t *)indata; DFS_PRINTK("%s peak_mag now = 0x%x \n", __func__, dfs->ath_dfs_peak_mag); break; case DFS_GET_CAC_VALID_TIME: if (!outdata || !outsize || *outsize < sizeof(u_int32_t)) { error = -EINVAL; break; } *outsize = sizeof (u_int32_t); *((u_int32_t *)outdata) = dfs->ic->ic_dfs_state.cac_valid_time; break; case DFS_SET_CAC_VALID_TIME: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->ic->ic_dfs_state.cac_valid_time = *(u_int32_t *)indata; DFS_PRINTK("%s dfs timeout = %d \n", __func__, dfs->ic->ic_dfs_state.cac_valid_time); break; case DFS_IGNORE_CAC: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } if (*(u_int32_t *)indata) { dfs->ic->ic_dfs_state.ignore_cac= 1; } else { dfs->ic->ic_dfs_state.ignore_cac= 0; } DFS_PRINTK("%s ignore cac = 0x%x \n", __func__, dfs->ic->ic_dfs_state.ignore_cac); break; case DFS_SET_NOL_TIMEOUT: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } if (*(int *)indata) { dfs->ath_dfs_nol_timeout= *(int *)indata; } else { dfs->ath_dfs_nol_timeout= DFS_NOL_TIMEOUT_S; } DFS_PRINTK("%s nol timeout = %d sec \n", __func__, dfs->ath_dfs_nol_timeout); break; #ifndef ATH_DFS_RADAR_DETECTION_ONLY case DFS_MUTE_TIME: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } data = (u_int32_t *) indata; dfs->ath_dfstesttime = *data; dfs->ath_dfstesttime *= (1000); //convert sec into ms break; case DFS_GET_USENOL: if (!outdata || !outsize || *outsize < sizeof(u_int32_t)) { error = -EINVAL; break; } *outsize = sizeof(u_int32_t); *((u_int32_t *)outdata) = dfs->dfs_rinfo.rn_use_nol; printk("%s:#Phyerr=%d, #false detect=%d, #queued=%d\n", __func__,dfs->dfs_phyerr_count, dfs->dfs_phyerr_reject_count, dfs->dfs_phyerr_queued_count); printk("%s:dfs_phyerr_freq_min=%d, dfs_phyerr_freq_max=%d\n", __func__,dfs->dfs_phyerr_freq_min, dfs->dfs_phyerr_freq_max); printk("%s:Total radar events detected=%d, entries in the radar queue follows:\n", __func__,dfs->dfs_event_log_count); for (i = 0; (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count); i++) { //DFS_DPRINTK(sc, ATH_DEBUG_DFS,"ts=%llu diff_ts=%u rssi=%u dur=%u\n", dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, dfs->radar_log[i].rssi, dfs->radar_log[i].dur); printk("ts=%llu diff_ts=%u rssi=%u dur=%u\n", dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, dfs->radar_log[i].rssi, dfs->radar_log[i].dur); } dfs->dfs_event_log_count = 0; dfs->dfs_phyerr_count = 0; dfs->dfs_phyerr_reject_count = 0; dfs->dfs_phyerr_queued_count = 0; dfs->dfs_phyerr_freq_min = 0x7fffffff; dfs->dfs_phyerr_freq_max = 0; break; case DFS_SET_USENOL: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->dfs_rinfo.rn_use_nol = *(u_int32_t *)indata; /* iwpriv markdfs in linux can do the same thing... */ break; case DFS_GET_NOL: if (!outdata || !outsize || *outsize < sizeof(struct dfsreq_nolinfo)) { error = -EINVAL; break; } *outsize = sizeof(struct dfsreq_nolinfo); nol = (struct dfsreq_nolinfo *)outdata; dfs_get_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol, &nol->ic_nchans); dfs_print_nol(dfs); break; case DFS_SET_NOL: if (insize < sizeof(struct dfsreq_nolinfo) || !indata) { error = -EINVAL; break; } nol = (struct dfsreq_nolinfo *) indata; dfs_set_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol, nol->ic_nchans); break; case DFS_SHOW_NOL: dfs_print_nol(dfs); break; #if ATH_SUPPORT_DFS && ATH_SUPPORT_STA_DFS case DFS_SHOW_NOLHISTORY: dfs_print_nolhistory(ic,dfs); break; #endif case DFS_BANGRADAR: #if 0 //MERGE_TBD if(sc->sc_nostabeacons) { printk("No radar detection Enabled \n"); break; } #endif dfs->dfs_bangradar = 1; dfs->ath_radar_tasksched = 1; OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0); break; #endif /* ATH_DFS_RADAR_DETECTION_ONLY */ default: error = -EINVAL; } bad: return error; }
void dfs_nol_addchan(struct ath_dfs *dfs, struct ieee80211_channel *chan, u_int32_t dfs_nol_timeout) { #define TIME_IN_MS 1000 #define TIME_IN_US (TIME_IN_MS * 1000) struct dfs_nolelem *nol, *elem, *prev; struct dfs_nol_timer_arg *dfs_nol_arg; /* XXX for now, assume all events are 20MHz wide */ int ch_width = 20; if (dfs == NULL) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS_NOL, "%s: sc_dfs is NULL\n", __func__); return; } nol = dfs->dfs_nol; prev = dfs->dfs_nol; elem = NULL; while (nol != NULL) { if ((nol->nol_freq == chan->ic_freq) && (nol->nol_chwidth == ch_width)) { nol->nol_start_ticks = adf_os_ticks(); nol->nol_timeout_ms = dfs_nol_timeout*TIME_IN_MS; DFS_DPRINTK(dfs, ATH_DEBUG_DFS_NOL, "%s: Update OS Ticks for NOL %d MHz / %d MHz\n", __func__, nol->nol_freq, nol->nol_chwidth); OS_CANCEL_TIMER(&nol->nol_timer); OS_SET_TIMER(&nol->nol_timer, dfs_nol_timeout*TIME_IN_MS); return; } prev = nol; nol = nol->nol_next; } /* Add a new element to the NOL*/ elem = (struct dfs_nolelem *)OS_MALLOC(dfs->ic->ic_osdev, sizeof(struct dfs_nolelem),GFP_ATOMIC); if (elem == NULL) { goto bad; } dfs_nol_arg = (struct dfs_nol_timer_arg *)OS_MALLOC(dfs->ic->ic_osdev, sizeof(struct dfs_nol_timer_arg), GFP_ATOMIC); if (dfs_nol_arg == NULL) { OS_FREE(elem); goto bad; } elem->nol_freq = chan->ic_freq; elem->nol_chwidth = ch_width; elem->nol_start_ticks = adf_os_ticks(); elem->nol_timeout_ms = dfs_nol_timeout*TIME_IN_MS; elem->nol_next = NULL; if (prev) { prev->nol_next = elem; } else { /* This is the first element in the NOL */ dfs->dfs_nol = elem; } dfs_nol_arg->dfs = dfs; dfs_nol_arg->delfreq = elem->nol_freq; dfs_nol_arg->delchwidth = elem->nol_chwidth; OS_INIT_TIMER(dfs->ic->ic_osdev, &elem->nol_timer, dfs_remove_from_nol, dfs_nol_arg); OS_SET_TIMER(&elem->nol_timer, dfs_nol_timeout*TIME_IN_MS); /* Update the NOL counter */ dfs->dfs_nol_count++; DFS_DPRINTK(dfs, ATH_DEBUG_DFS_NOL, "%s: new NOL channel %d MHz / %d MHz\n", __func__, elem->nol_freq, elem->nol_chwidth); return; bad: DFS_DPRINTK(dfs, ATH_DEBUG_DFS_NOL | ATH_DEBUG_DFS, "%s: failed to allocate memory for nol entry\n", __func__); #undef TIME_IN_MS #undef TIME_IN_US }
int dfs_control(ath_dev_t dev, u_int id, void *indata, u_int32_t insize, void *outdata, u_int32_t *outsize) { struct ath_softc *sc = ATH_DEV_TO_SC(dev); int error = 0; u_int32_t *data = NULL; HAL_PHYERR_PARAM peout; struct dfsreq_nolinfo *nol; struct ath_dfs *dfs = sc->sc_dfs; struct dfs_ioctl_params *dfsparams; u_int32_t val=0; if (dfs == NULL) { error = -EINVAL; DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s DFS is null\n", __func__); goto bad; } switch (id) { case DFS_MUTE_TIME: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } data = (u_int32_t *) indata; sc->sc_dfs->sc_dfstesttime = *data; sc->sc_dfs->sc_dfstesttime *= (1000); //convert sec into ms break; case DFS_SET_THRESH: if (insize < sizeof(HAL_PHYERR_PARAM) || !indata) { error = -EINVAL; break; } dfsparams = (struct dfs_ioctl_params *) indata; if (!dfs_set_thresholds(sc, DFS_PARAM_FIRPWR, dfsparams->dfs_firpwr)) error = -EINVAL; if (!dfs_set_thresholds(sc, DFS_PARAM_RRSSI, dfsparams->dfs_rrssi)) error = -EINVAL; if (!dfs_set_thresholds(sc, DFS_PARAM_HEIGHT, dfsparams->dfs_height)) error = -EINVAL; if (!dfs_set_thresholds(sc, DFS_PARAM_PRSSI, dfsparams->dfs_prssi)) error = -EINVAL; if (!dfs_set_thresholds(sc, DFS_PARAM_INBAND, dfsparams->dfs_inband)) error = -EINVAL; /* 5413 speicfic */ if (!dfs_set_thresholds(sc, DFS_PARAM_RELPWR, dfsparams->dfs_relpwr)) error = -EINVAL; if (!dfs_set_thresholds(sc, DFS_PARAM_RELSTEP, dfsparams->dfs_relstep)) error = -EINVAL; if (!dfs_set_thresholds(sc, DFS_PARAM_MAXLEN, dfsparams->dfs_maxlen)) error = -EINVAL; break; case DFS_GET_THRESH: if (!outdata || !outsize || *outsize <sizeof(struct dfs_ioctl_params)) { error = -EINVAL; break; } *outsize = sizeof(struct dfs_ioctl_params); ath_hal_getdfsthresh(sc->sc_ah, &peout); dfsparams = (struct dfs_ioctl_params *) outdata; dfsparams->dfs_firpwr = peout.pe_firpwr; dfsparams->dfs_rrssi = peout.pe_rrssi; dfsparams->dfs_height = peout.pe_height; dfsparams->dfs_prssi = peout.pe_prssi; dfsparams->dfs_inband = peout.pe_inband; /* 5413 specific */ dfsparams->dfs_relpwr = peout.pe_relpwr; dfsparams->dfs_relstep = peout.pe_relstep; dfsparams->dfs_maxlen = peout.pe_maxlen; break; case DFS_GET_USENOL: if (!outdata || !outsize || *outsize < sizeof(u_int32_t)) { error = -EINVAL; break; } *outsize = sizeof(u_int32_t); *((u_int32_t *)outdata) = dfs->dfs_rinfo.rn_use_nol; break; case DFS_SET_USENOL: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->dfs_rinfo.rn_use_nol = *(u_int32_t *)indata; /* iwpriv markdfs in linux can do the same thing... */ break; case DFS_SHOW_NOL: dfs_print_nol(sc); break; case DFS_BANGRADAR: if(sc->sc_nostabeacons) { printk("No radar detection Enabled \n"); break; } dfs->dfs_bangradar = 1; sc->sc_rtasksched = 1; OS_SET_TIMER(&sc->sc_dfs->sc_dfs_task_timer, 0); break; case DFS_RADARDETECTS: if (!outdata || !outsize || *outsize < sizeof(u_int32_t)) { error = -EINVAL; break; } *outsize = sizeof (u_int32_t); *((u_int32_t *)outdata) = dfs->ath_dfs_stats.num_radar_detects; break; case DFS_DISABLE_DETECT: /*zhaoyang1 transplant from 717*/ /*zhaoyang modfiy for disable DFS PCAPVXN-85*/ dfs->sc_dfs_isdfsregdomain = 0; /*zhaoyang modify end*/ dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; printk("%s disable detects\n", __func__); break; case DFS_ENABLE_DETECT: /*zhaoyang modfiy for disable DFS PCAPVXN-85*/ dfs->sc_dfs_isdfsregdomain = 1; /*zhaoyang modify end*/ /*zhaoyang1 transplant end*/ dfs->dfs_proc_phyerr |= DFS_RADAR_EN; printk("%s enable detects\n", __func__); break; case DFS_DISABLE_FFT: #define AR_PHY_RADAR_0 0x9954 /* radar detection settings */ #define AR_PHY_RADAR_DISABLE_FFT 0x7FFFFFFF val = OS_REG_READ(sc->sc_ah, AR_PHY_RADAR_0); val &= AR_PHY_RADAR_DISABLE_FFT; OS_REG_WRITE(sc->sc_ah, AR_PHY_RADAR_0, val); val = OS_REG_READ(sc->sc_ah, AR_PHY_RADAR_0); #undef AR_PHY_RADAR_0 #undef AR_PHY_RADAR_DISABLE_FFT printk("%s disable FFT val=0x%x \n", __func__, val); break; case DFS_ENABLE_FFT: #define AR_PHY_RADAR_0 0x9954 /* radar detection settings */ #define AR_PHY_RADAR_ENABLE_FFT 0x80000000 val = OS_REG_READ(sc->sc_ah, AR_PHY_RADAR_0); val |= AR_PHY_RADAR_ENABLE_FFT; OS_REG_WRITE(sc->sc_ah, AR_PHY_RADAR_0, val); val = OS_REG_READ(sc->sc_ah, AR_PHY_RADAR_0); #undef AR_PHY_RADAR_ENABLE_FFT #undef AR_PHY_RADAR_0 /* radar detection settings */ printk("%s enable FFT val=0x%x \n", __func__, val); break; case DFS_SET_DEBUG_LEVEL: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs_debug_level = *(u_int32_t *)indata; dfs_debug_level = (ATH_DEBUG_DFS << dfs_debug_level); printk("%s debug level now = 0x%x \n", __func__, dfs_debug_level); break; case DFS_GET_NOL: if (!outdata || !outsize || *outsize < sizeof(struct dfsreq_nolinfo)) { error = -EINVAL; break; } *outsize = sizeof(struct dfsreq_nolinfo); nol = (struct dfsreq_nolinfo *)outdata; dfs_get_nol(sc, (struct dfsreq_nolelem *)nol->dfs_nol, &nol->ic_nchans); break; case DFS_SET_NOL: if (insize < sizeof(struct dfsreq_nolinfo) || !indata) { error = -EINVAL; break; } nol = (struct dfsreq_nolinfo *) indata; dfs_set_nol(sc, (struct dfsreq_nolelem *)nol->dfs_nol, nol->ic_nchans); break; default: error = -EINVAL; } bad: return error; }
/* process credit reports and call distribution function */ void HTCProcessCreditRpt(HTC_TARGET *target, HTC_CREDIT_REPORT *pRpt0, a_uint32_t RecLen, HTC_ENDPOINT_ID FromEndpoint) { a_uint32_t i; a_uint32_t NumEntries; #ifdef HTC_HOST_CREDIT_DIST a_uint32_t totalCredits = 0; a_uint8_t doDist = FALSE; HTC_ENDPOINT *pEndpoint; a_uint16_t seq_diff; a_uint16_t tgt_seq; #endif HTC_CREDIT_REPORT_1_1 *pRpt = (HTC_CREDIT_REPORT_1_1 *)pRpt0 ; NumEntries = RecLen / (sizeof(HTC_CREDIT_REPORT_1_1)) ; /* lock out TX while we update credits */ LOCK_HTC_TX(target); for (i = 0; i < NumEntries; i++, pRpt++) { if (pRpt->EndpointID >= ENDPOINT_MAX) { adf_os_assert(0); break; } #ifdef HTC_HOST_CREDIT_DIST pEndpoint = &target->EndPoint[pRpt->EndpointID]; tgt_seq = adf_os_ntohs(pRpt->TgtCreditSeqNo); if (ENDPOINT0 == pRpt->EndpointID) { /* always give endpoint 0 credits back */ seq_diff = (tgt_seq - pEndpoint->LastCreditSeq) & (HTC_SEQ_MAX -1); pEndpoint->CreditDist.TxCredits += seq_diff; pEndpoint->LastCreditSeq = tgt_seq; } else { /* for all other endpoints, update credits to distribute, the distribution function * will handle giving out credits back to the endpoints */ #ifdef MAGPIE_HIF_GMAC if ( pRpt->EndpointID == 6 ) OS_SET_TIMER(&host_seek_credit_timer, 1000); #endif seq_diff = (tgt_seq - pEndpoint->LastCreditSeq) & (HTC_SEQ_MAX -1); pEndpoint->CreditDist.TxCreditsToDist += seq_diff; pEndpoint->LastCreditSeq = tgt_seq; /* flag that we have to do the distribution */ doDist = TRUE; } totalCredits += seq_diff; #endif } #ifdef HTC_HOST_CREDIT_DIST if (doDist) { /* this was a credit return based on a completed send operations * note, this is done with the lock held */ DO_DISTRIBUTION(target, HTC_CREDIT_DIST_SEND_COMPLETE, "Send Complete", target->EpCreditDistributionListHead->pNext); } #endif UNLOCK_HTC_TX(target); }
int __init ath_simple_config_init(void) { #ifdef CONFIG_CUS100 u32 mask = 0; #endif #ifdef JUMPSTART_GPIO int req; #endif int ret; #ifdef AP_RESET_GPIO int req2; #endif ret = misc_register(&athfr_miscdev); if (ret < 0) { printk("*** ath misc_register failed %d *** \n", ret); return -1; } #ifdef AP_RESET_GPIO ath_gpio_config_input(AP_RESET_GPIO); ath_gpio_config_int(AP_RESET_GPIO, INT_TYPE_LEVEL, INT_POL_ACTIVE_LOW); printk("%s (%s) AP_RESET_GPIO: %d\n", __FILE__, __func__, AP_RESET_GPIO); #endif #ifdef JUMPSTART_GPIO #ifdef CONFIG_CUS100 mask = ath_reg_rd(ATH_MISC_INT_MASK); ath_reg_wr(ATH_MISC_INT_MASK, mask | (1 << 2)); ath_gpio_config_int(JUMPSTART_GPIO, INT_TYPE_LEVEL, INT_POL_ACTIVE_HIGH); ath_gpio_intr_enable(JUMPSTART_GPIO); ath_gpio_config_input(JUMPSTART_GPIO); #else ath_gpio_config_input(JUMPSTART_GPIO); /* configure Jumpstart GPIO as level triggered interrupt */ ath_gpio_config_int(JUMPSTART_GPIO, INT_TYPE_LEVEL, INT_POL_ACTIVE_LOW); printk("%s (%s) JUMPSTART_GPIO: %d\n", __FILE__, __func__, JUMPSTART_GPIO); #ifndef CONFIG_MACH_AR934x ath_reg_rmw_clear(ATH_GPIO_FUNCTIONS, (1 << 2)); ath_reg_rmw_clear(ATH_GPIO_FUNCTIONS, (1 << 16)); ath_reg_rmw_clear(ATH_GPIO_FUNCTIONS, (1 << 20)); #endif #endif req = request_irq(ATH_GPIO_IRQn(JUMPSTART_GPIO), jumpstart_irq, 0, #ifdef AP_RESET_GPIO "SW JUMPSTART", NULL); #else "SW JUMPSTART/FACTORY RESET", NULL); #endif if (req != 0) { printk("request_irq for jumpstart failed (error %d)\n", req); misc_deregister(&athfr_miscdev); ath_gpio_intr_shutdown(ATH_GPIO_IRQn(JUMPSTART_GPIO)); return -1; } #endif /* #ifdef JUMPSTART_GPIO */ #ifdef AP_RESET_GPIO req2 = request_irq(ATH_GPIO_IRQn(AP_RESET_GPIO), ath_reset_irq, 0, "FACTORY RESET", NULL); if (req2 != 0) { printk("request_irq for factory reset failed (error %d)\n", req); misc_deregister(&athfr_miscdev); free_irq(req, NULL); return -1; } #endif #ifdef ATH_S17INT_GPIO ath_gpio_config_input(ATH_S17INT_GPIO); /* configure S17 interrupt GPIO as level triggered interrupt */ ath_gpio_config_int(ATH_S17INT_GPIO, INT_TYPE_LEVEL, INT_POL_ACTIVE_LOW); printk("%s (%s) ATH_S17INT_GPIO: %d\n", __FILE__, __func__, ATH_S17INT_GPIO); #endif #if !defined(CONFIG_I2S) && defined(AP_USB_LED_GPIO) ath_gpio_config_output(AP_USB_LED_GPIO); #endif init_waitqueue_head(&ath_fr_wq); #ifdef WPS_LED_GPIO create_simple_config_led_proc_entry(); #endif #ifdef POWER_ON_GLED_GPIO printk("%s (%s) POWER_ON_GLED_GPIO: %d\n", __FILE__, __func__, POWER_ON_GLED_GPIO); ath_gpio_config_output(POWER_ON_GLED_GPIO); ath_gpio_out_val(POWER_ON_GLED_GPIO, POWER_LED_ON); #endif #ifdef POWER_ON_RLED_GPIO printk("%s (%s) POWER_ON_RLED_GPIO: %d\n", __FILE__, __func__, POWER_ON_RLED_GPIO); ath_gpio_config_output(POWER_ON_RLED_GPIO); ath_gpio_out_val(POWER_ON_RLED_GPIO, POWER_LED_OFF); OS_INIT_TIMER(NULL, &power_on_timer, power_led_blink, NULL); OS_SET_TIMER(&power_on_timer, POWER_LED_BLINK_INTERVAL); #endif return 0; }
static int gpio_simple_config_led_write(struct file *file, const char *buf, unsigned long count, void *data) { u_int32_t val; if (sscanf(buf, "%d", &val) != 1) { printk("\n val wrong %d\n", val); return -EINVAL; } printk("\n config_led_write %d \n", val); if (val == SIMPLE_CONFIG_ON) { printk("\nWPS SIMPLE_CONFIG_ON\n"); /* WPS Success */ simple_config_led_state = SIMPLE_CONFIG_ON; OS_CANCEL_TIMER(&os_timer_t); WPSled=WPS_LED_ON; gpio_wps_other_led_off(); ath_gpio_out_val(wps_led_gpio, WPSled); wps_success_func(); OS_INIT_TIMER(NULL, &os_timer_t, wps_led_on, &os_timer_t); OS_SET_TIMER(&os_timer_t, 100); } else if (val == SIMPLE_CONFIG_OFF) { /* WPS failed */ simple_config_led_state = SIMPLE_CONFIG_OFF; OS_CANCEL_TIMER(&os_timer_t); WPSled=WPS_LED_OFF; gpio_wps_other_led_off(); printk("\nWPS SIMPLE_CONFIG_OFF\n"); ath_gpio_out_val(wps_led_gpio, WPSled); } /* START ADD: c00217102 2012-8-12 FOR WS323 */ else if (val == SIMPLE_CONFIG_OVERLAP) { printk("\nWPS SIMPLE_CONFIG_OVERLAP\n"); simple_config_led_state = SIMPLE_CONFIG_OVERLAP; OS_CANCEL_TIMER(&os_timer_t); WPSled=WPS_LED_ON; gpio_wps_other_led_off(); ath_gpio_out_val(wps_led_gpio, WPSled); OS_INIT_TIMER(NULL, &os_timer_t, wps_led_overlap, &os_timer_t); OS_SET_TIMER(&os_timer_t, 100); }else if (val == SIMPLE_CONFIG_INGRESS_ERROR) { simple_config_led_state = SIMPLE_CONFIG_INGRESS_ERROR; OS_CANCEL_TIMER(&os_timer_t); WPSled=WPS_LED_ON; gpio_wps_other_led_off(); ath_gpio_out_val(wps_led_gpio, WPSled); printk("\nWPS SIMPLE_CONFIG_INGRESS_ERROR\n"); OS_INIT_TIMER(NULL, &os_timer_t, wps_led_error, &os_timer_t); OS_SET_TIMER(&os_timer_t, 100); } else if (val == SIMPLE_CONFIG_INGRESS) { simple_config_led_state = SIMPLE_CONFIG_INGRESS; OS_CANCEL_TIMER(&os_timer_t); WPSled=WPS_LED_ON; gpio_wps_other_led_off(); ath_gpio_out_val(wps_led_gpio, WPSled); printk("\nWPS SIMPLE_CONFIG_INGRESS\n"); OS_INIT_TIMER(NULL, &os_timer_t, wps_led_ingress, &os_timer_t); OS_SET_TIMER(&os_timer_t, 100); } /* END ADD: c00217102 2012-8-12 FOR WS323 */ return count; }
int dfs_control(struct ieee80211com *ic, u_int id, void *indata, u_int32_t insize, void *outdata, u_int32_t *outsize) { int error = 0; struct ath_dfs_phyerr_param peout; struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs; struct dfs_ioctl_params *dfsparams; u_int32_t val=0; #ifndef ATH_DFS_RADAR_DETECTION_ONLY struct dfsreq_nolinfo *nol; u_int32_t *data = NULL; #endif /* ATH_DFS_RADAR_DETECTION_ONLY */ int i; if (dfs == NULL) { error = -EINVAL; DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s DFS is null\n", __func__); goto bad; } switch (id) { case DFS_SET_THRESH: if (insize < sizeof(struct dfs_ioctl_params) || !indata) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: insize=%d, expected=%zu bytes, indata=%p\n", __func__, insize, sizeof(struct dfs_ioctl_params), indata); error = -EINVAL; break; } dfsparams = (struct dfs_ioctl_params *) indata; if (!dfs_set_thresholds(ic, DFS_PARAM_FIRPWR, dfsparams->dfs_firpwr)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_RRSSI, dfsparams->dfs_rrssi)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_HEIGHT, dfsparams->dfs_height)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_PRSSI, dfsparams->dfs_prssi)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_INBAND, dfsparams->dfs_inband)) error = -EINVAL; /* 5413 speicfic */ if (!dfs_set_thresholds(ic, DFS_PARAM_RELPWR, dfsparams->dfs_relpwr)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_RELSTEP, dfsparams->dfs_relstep)) error = -EINVAL; if (!dfs_set_thresholds(ic, DFS_PARAM_MAXLEN, dfsparams->dfs_maxlen)) error = -EINVAL; break; case DFS_GET_THRESH: if (!outdata || !outsize || *outsize <sizeof(struct dfs_ioctl_params)) { error = -EINVAL; break; } *outsize = sizeof(struct dfs_ioctl_params); dfsparams = (struct dfs_ioctl_params *) outdata; /* * Fetch the DFS thresholds using the internal representation. */ (void) dfs_get_thresholds(ic, &peout); /* * Convert them to the dfs IOCTL representation. */ ath_dfs_dfsparam_to_ioctlparam(&peout, dfsparams); break; case DFS_RADARDETECTS: if (!outdata || !outsize || *outsize < sizeof(u_int32_t)) { error = -EINVAL; break; } *outsize = sizeof (u_int32_t); *((u_int32_t *)outdata) = dfs->ath_dfs_stats.num_radar_detects; break; case DFS_DISABLE_DETECT: dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; dfs->ic->ic_dfs_state.ignore_dfs = 1; DFS_PRINTK("%s enable detects, ignore_dfs %d\n", __func__, dfs->ic->ic_dfs_state.ignore_dfs); break; case DFS_ENABLE_DETECT: dfs->dfs_proc_phyerr |= DFS_RADAR_EN; dfs->ic->ic_dfs_state.ignore_dfs = 0; DFS_PRINTK("%s enable detects, ignore_dfs %d\n", __func__, dfs->ic->ic_dfs_state.ignore_dfs); break; case DFS_DISABLE_FFT: //UMACDFS: TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, false); DFS_PRINTK("%s TODO disable FFT val=0x%x \n", __func__, val); break; case DFS_ENABLE_FFT: //UMACDFS TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, true); DFS_PRINTK("%s TODO enable FFT val=0x%x \n", __func__, val); break; case DFS_SET_DEBUG_LEVEL: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->dfs_debug_mask= *(u_int32_t *)indata; DFS_PRINTK("%s debug level now = 0x%x \n", __func__, dfs->dfs_debug_mask); if (dfs->dfs_debug_mask & ATH_DEBUG_DFS3) { /* Enable debug Radar Event */ dfs->dfs_event_log_on = 1; } else { dfs->dfs_event_log_on = 0; } break; case DFS_SET_FALSE_RSSI_THRES: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->ath_dfs_false_rssi_thres= *(u_int32_t *)indata; DFS_PRINTK("%s false RSSI threshold now = 0x%x \n", __func__, dfs->ath_dfs_false_rssi_thres); break; case DFS_SET_PEAK_MAG: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->ath_dfs_peak_mag= *(u_int32_t *)indata; DFS_PRINTK("%s peak_mag now = 0x%x \n", __func__, dfs->ath_dfs_peak_mag); break; case DFS_IGNORE_CAC: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } if (*(u_int32_t *)indata) { dfs->ic->ic_dfs_state.ignore_cac= 1; } else { dfs->ic->ic_dfs_state.ignore_cac= 0; } DFS_PRINTK("%s ignore cac = 0x%x \n", __func__, dfs->ic->ic_dfs_state.ignore_cac); break; case DFS_SET_NOL_TIMEOUT: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } if (*(int *)indata) { dfs->ath_dfs_nol_timeout= *(int *)indata; } else { dfs->ath_dfs_nol_timeout= DFS_NOL_TIMEOUT_S; } DFS_PRINTK("%s nol timeout = %d sec \n", __func__, dfs->ath_dfs_nol_timeout); break; #ifndef ATH_DFS_RADAR_DETECTION_ONLY case DFS_MUTE_TIME: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } data = (u_int32_t *) indata; dfs->ath_dfstesttime = *data; dfs->ath_dfstesttime *= (1000); //convert sec into ms break; case DFS_GET_USENOL: if (!outdata || !outsize || *outsize < sizeof(u_int32_t)) { error = -EINVAL; break; } *outsize = sizeof(u_int32_t); *((u_int32_t *)outdata) = dfs->dfs_rinfo.rn_use_nol; for (i = 0; (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count); i++) { //DFS_DPRINTK(sc, ATH_DEBUG_DFS,"ts=%llu diff_ts=%u rssi=%u dur=%u\n", dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, dfs->radar_log[i].rssi, dfs->radar_log[i].dur); } dfs->dfs_event_log_count = 0; dfs->dfs_phyerr_count = 0; dfs->dfs_phyerr_reject_count = 0; dfs->dfs_phyerr_queued_count = 0; dfs->dfs_phyerr_freq_min = 0x7fffffff; dfs->dfs_phyerr_freq_max = 0; break; case DFS_SET_USENOL: if (insize < sizeof(u_int32_t) || !indata) { error = -EINVAL; break; } dfs->dfs_rinfo.rn_use_nol = *(u_int32_t *)indata; /* iwpriv markdfs in linux can do the same thing... */ break; case DFS_GET_NOL: if (!outdata || !outsize || *outsize < sizeof(struct dfsreq_nolinfo)) { error = -EINVAL; break; } *outsize = sizeof(struct dfsreq_nolinfo); nol = (struct dfsreq_nolinfo *)outdata; dfs_get_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol, &nol->ic_nchans); dfs_print_nol(dfs); break; case DFS_SET_NOL: if (insize < sizeof(struct dfsreq_nolinfo) || !indata) { error = -EINVAL; break; } nol = (struct dfsreq_nolinfo *) indata; dfs_set_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol, nol->ic_nchans); break; case DFS_SHOW_NOL: dfs_print_nol(dfs); break; case DFS_BANGRADAR: #if 0 //MERGE_TBD if(sc->sc_nostabeacons) { printk("No radar detection Enabled \n"); break; } #endif dfs->dfs_bangradar = 1; dfs->ath_radar_tasksched = 1; OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0); break; #endif /* ATH_DFS_RADAR_DETECTION_ONLY */ default: error = -EINVAL; } bad: return error; }
void dfs_process_phyerr(struct ieee80211com *ic, void *buf, uint16_t datalen, uint8_t r_rssi, uint8_t r_ext_rssi, uint32_t r_rs_tstamp, uint64_t r_fulltsf, bool enable_log) { struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs; struct dfs_ieee80211_channel *chan = ic->ic_curchan; struct dfs_event *event; struct dfs_phy_err e; int empty; if (dfs == NULL) { CDF_TRACE(CDF_MODULE_ID_SAP, CDF_TRACE_LEVEL_ERROR, "%s: sc_dfs is NULL\n", __func__); return; } dfs->dfs_phyerr_count++; dump_phyerr_contents(buf, datalen); /* * XXX The combined_rssi_ok support has been removed. * This was only clear for Owl. * * XXX TODO: re-add this; it requires passing in the ctl/ext * RSSI set from the RX status descriptor. * * XXX TODO TODO: this may be done for us from the legacy * phy error path in ath_dev; please review that code. */ /* * At this time we have a radar pulse that we need to examine and * queue. But if dfs_process_radarevent already detected radar and set * CHANNEL_INTERFERENCE flag then do not queue any more radar data. * When we are in a new channel this flag will be clear and we will * start queueing data for new channel. (EV74162) */ if (dfs->dfs_debug_mask & ATH_DEBUG_DFS_PHYERR_PKT) dump_phyerr_contents(buf, datalen); if (chan == NULL) { CDF_TRACE(CDF_MODULE_ID_SAP, CDF_TRACE_LEVEL_ERROR, "%s: chan is NULL\n", __func__); return; } cdf_spin_lock_bh(&ic->chan_lock); if (IEEE80211_IS_CHAN_RADAR(chan)) { cdf_spin_unlock_bh(&ic->chan_lock); DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: Radar already found in the channel, " " do not queue radar data\n", __func__); return; } cdf_spin_unlock_bh(&ic->chan_lock); dfs->ath_dfs_stats.total_phy_errors++; DFS_DPRINTK(dfs, ATH_DEBUG_DFS2, "%s[%d] phyerr %d len %d\n", __func__, __LINE__, dfs->ath_dfs_stats.total_phy_errors, datalen); /* * hardware stores this as 8 bit signed value. * we will cap it at 0 if it is a negative number */ if (r_rssi & 0x80) r_rssi = 0; if (r_ext_rssi & 0x80) r_ext_rssi = 0; OS_MEMSET(&e, 0, sizeof(e)); /* * This is a bit evil - instead of just passing in * the chip version, the existing code uses a set * of HAL capability bits to determine what is * possible. * * The way I'm decoding it is thus: * * + DFS enhancement? Merlin or later * + DFS extension channel? Sowl or later. (Howl?) * + otherwise, Owl (and legacy.) */ if (dfs->dfs_caps.ath_chip_is_bb_tlv) { if (dfs_process_phyerr_bb_tlv(dfs, buf, datalen, r_rssi, r_ext_rssi, r_rs_tstamp, r_fulltsf, &e, enable_log) == 0) { dfs->dfs_phyerr_reject_count++; return; } else { if (dfs->dfs_phyerr_freq_min > e.freq) dfs->dfs_phyerr_freq_min = e.freq; if (dfs->dfs_phyerr_freq_max < e.freq) dfs->dfs_phyerr_freq_max = e.freq; } } else if (dfs->dfs_caps.ath_dfs_use_enhancement) { if (dfs_process_phyerr_merlin(dfs, buf, datalen, r_rssi, r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) { return; } } else if (dfs->dfs_caps.ath_dfs_ext_chan_ok) { if (dfs_process_phyerr_sowl(dfs, buf, datalen, r_rssi, r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) { return; } } else { if (dfs_process_phyerr_owl(dfs, buf, datalen, r_rssi, r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) { return; } } CDF_TRACE(CDF_MODULE_ID_SAP, CDF_TRACE_LEVEL_INFO, "\n %s: Frequency at which the phyerror was injected = %d", __func__, e.freq); /* * If the hardware supports radar reporting on the extension channel * it will supply FFT data for longer radar pulses. * * TLV chips don't go through this software check - the hardware * check should be enough. If we want to do software checking * later on then someone will have to craft an FFT parser * suitable for the TLV FFT data format. */ if ((!dfs->dfs_caps.ath_chip_is_bb_tlv) && dfs->dfs_caps.ath_dfs_ext_chan_ok) { /* * HW has a known issue with chirping pulses injected at or * around DC in 40MHz mode. Such pulses are reported with * much lower durations and SW then discards them because * they do not fit the minimum bin5 pulse duration. * * To work around this issue, if a pulse is within a 10us * range of the bin5 min duration, check if the pulse is * chirping. If the pulse is chirping, bump up the duration * to the minimum bin5 duration. * * This makes sure that a valid chirping pulse will not be * discarded because of incorrect low duration. * * TBD - Is it possible to calculate the 'real' duration of * the pulse using the slope of the FFT data? * * TBD - Use FFT data to differentiate between radar pulses * and false PHY errors. * This will let us reduce the number of false alarms seen. * * BIN 5 chirping pulses are only for FCC or Japan MMK4 domain */ if (((dfs->dfsdomain == DFS_FCC_DOMAIN) || (dfs->dfsdomain == DFS_MKK4_DOMAIN)) && (e.dur >= MAYBE_BIN5_DUR) && (e.dur < MAX_BIN5_DUR)) { int add_dur; int slope = 0, dc_found = 0; /* * Set the event chirping flags; as we're doing * an actual chirp check. */ e.do_check_chirp = 1; e.is_hw_chirp = 0; e.is_sw_chirp = 0; /* * dfs_check_chirping() expects is_pri and is_ext * to be '1' for true and '0' for false for now, * as the function itself uses these values in * constructing things rather than testing them * for 'true' or 'false'. */ add_dur = dfs_check_chirping(dfs, buf, datalen, (e.is_pri ? 1 : 0), (e.is_ext ? 1 : 0), &slope, &dc_found); if (add_dur) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR, "old dur %d slope =%d\n", e.dur, slope); e.is_sw_chirp = 1; /* bump up to a random bin5 pulse duration */ if (e.dur < MIN_BIN5_DUR) { e.dur = dfs_get_random_bin5_dur(dfs, e.fulltsf); } DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR, "new dur %d\n", e.dur); } else { /* set the duration so that it is rejected */ e.is_sw_chirp = 0; e.dur = MAX_BIN5_DUR + 100; DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR, "is_chirping = %d dur=%d\n", add_dur, e.dur); } } else { /* * We have a pulse that is either bigger than * MAX_BIN5_DUR or * less than MAYBE_BIN5_DUR */ if ((dfs->dfsdomain == DFS_FCC_DOMAIN) || (dfs->dfsdomain == DFS_MKK4_DOMAIN)) { /* * XXX Would this result in very large pulses * wrapping around to become short pulses? */ if (e.dur >= MAX_BIN5_DUR) { /* * set the duration so that it is * rejected */ e.dur = MAX_BIN5_DUR + 50; } } } } /* * Add the parsed, checked and filtered entry to the radar pulse * event list. This is then checked by dfs_radar_processevent(). * * XXX TODO: some filtering is still done below this point - fix * XXX this! */ ATH_DFSEVENTQ_LOCK(dfs); empty = STAILQ_EMPTY(&(dfs->dfs_eventq)); ATH_DFSEVENTQ_UNLOCK(dfs); if (empty) { return; } /* * If the channel is a turbo G channel, then the event is * for the adaptive radio (AR) pattern matching rather than * radar detection. */ cdf_spin_lock_bh(&ic->chan_lock); if ((chan->ic_flags & CHANNEL_108G) == CHANNEL_108G) { cdf_spin_unlock_bh(&ic->chan_lock); if (!(dfs->dfs_proc_phyerr & DFS_AR_EN)) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS2, "%s: DFS_AR_EN not enabled\n", __func__); return; } ATH_DFSEVENTQ_LOCK(dfs); event = STAILQ_FIRST(&(dfs->dfs_eventq)); if (event == NULL) { ATH_DFSEVENTQ_UNLOCK(dfs); DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: no more events space left\n", __func__); return; } STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); ATH_DFSEVENTQ_UNLOCK(dfs); event->re_rssi = e.rssi; event->re_dur = e.dur; event->re_full_ts = e.fulltsf; event->re_ts = (e.rs_tstamp) & DFS_TSMASK; event->re_chanindex = dfs->dfs_curchan_radindex; event->re_flags = 0; event->sidx = e.sidx; /* * Handle chirp flags. */ if (e.do_check_chirp) { event->re_flags |= DFS_EVENT_CHECKCHIRP; if (e.is_hw_chirp) event->re_flags |= DFS_EVENT_HW_CHIRP; if (e.is_sw_chirp) event->re_flags |= DFS_EVENT_SW_CHIRP; } ATH_ARQ_LOCK(dfs); STAILQ_INSERT_TAIL(&(dfs->dfs_arq), event, re_list); ATH_ARQ_UNLOCK(dfs); } else { if (IEEE80211_IS_CHAN_DFS(chan)) { cdf_spin_unlock_bh(&ic->chan_lock); if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS3, "%s: DFS_RADAR_EN not enabled\n", __func__); return; } /* * rssi is not accurate for short pulses, so do * not filter based on that for short duration pulses * * XXX do this filtering above? */ if (dfs->dfs_caps.ath_dfs_ext_chan_ok) { if ((e.rssi < dfs->dfs_rinfo.rn_minrssithresh && (e.dur > 4)) || e.dur > (dfs->dfs_rinfo.rn_maxpulsedur)) { dfs->ath_dfs_stats.rssi_discards++; DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "Extension channel pulse is " "discarded: dur=%d, " "maxpulsedur=%d, rssi=%d, " "minrssi=%d\n", e.dur, dfs->dfs_rinfo. rn_maxpulsedur, e.rssi, dfs->dfs_rinfo. rn_minrssithresh); return; } } else { if (e.rssi < dfs->dfs_rinfo.rn_minrssithresh || e.dur > dfs->dfs_rinfo.rn_maxpulsedur) { /* XXX TODO add a debug statement? */ dfs->ath_dfs_stats.rssi_discards++; return; } } /* * Add the event to the list, if there's space. */ ATH_DFSEVENTQ_LOCK(dfs); event = STAILQ_FIRST(&(dfs->dfs_eventq)); if (event == NULL) { ATH_DFSEVENTQ_UNLOCK(dfs); DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: no more events space left\n", __func__); return; } STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); ATH_DFSEVENTQ_UNLOCK(dfs); dfs->dfs_phyerr_queued_count++; dfs->dfs_phyerr_w53_counter++; event->re_dur = e.dur; event->re_full_ts = e.fulltsf; event->re_ts = (e.rs_tstamp) & DFS_TSMASK; event->re_rssi = e.rssi; event->sidx = e.sidx; /* * Handle chirp flags. */ if (e.do_check_chirp) { event->re_flags |= DFS_EVENT_CHECKCHIRP; if (e.is_hw_chirp) event->re_flags |= DFS_EVENT_HW_CHIRP; if (e.is_sw_chirp) event->re_flags |= DFS_EVENT_SW_CHIRP; } /* * Correctly set which channel is being reported on */ if (e.is_pri) { event->re_chanindex = dfs->dfs_curchan_radindex; } else { if (dfs->dfs_extchan_radindex == -1) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR, "%s - phyerr on ext channel\n", __func__); } event->re_chanindex = dfs->dfs_extchan_radindex; DFS_DPRINTK(dfs, ATH_DEBUG_DFS_PHYERR, "%s New extension channel event is added " "to queue\n", __func__); } ATH_DFSQ_LOCK(dfs); STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list); ATH_DFSQ_UNLOCK(dfs); } else { cdf_spin_unlock_bh(&ic->chan_lock); } } /* * Schedule the radar/AR task as appropriate. * * XXX isn't a lock needed for ath_radar_tasksched? */ /* * Commenting out the dfs_process_ar_event() since the function is never * called at run time as dfs_arq will be empty and the function * dfs_process_ar_event is obsolete and function definition is removed * as part of dfs_ar.c file * * if (!STAILQ_EMPTY(&dfs->dfs_arq)) * // XXX shouldn't this be a task/timer too? * dfs_process_ar_event(dfs, ic->ic_curchan); */ if (!STAILQ_EMPTY(&dfs->dfs_radarq) && !dfs->ath_radar_tasksched) { dfs->ath_radar_tasksched = 1; OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0); } #undef EXT_CH_RADAR_FOUND #undef PRI_CH_RADAR_FOUND #undef EXT_CH_RADAR_EARLY_FOUND }
/* * Start this IC */ void ieee80211_start_running(struct ieee80211com *ic) { OS_SET_TIMER(&ic->ic_inact_timer, IEEE80211_INACT_WAIT*1000); }