int dfs_attach(struct ieee80211com *ic) { int i, n; struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs; struct ath_dfs_radar_tab_info radar_info; #define N(a) (sizeof(a)/sizeof(a[0])) if (dfs != NULL) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs was not NULL\n", __func__); return 1; } if (ic->ic_dfs_state.ignore_dfs) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ignoring dfs\n", __func__); return 0; } dfs = (struct ath_dfs *)OS_MALLOC(ic->ic_osdev, sizeof(struct ath_dfs), GFP_ATOMIC); if (dfs == NULL) { DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ath_dfs allocation failed\n", __func__); return 1; } OS_MEMZERO(dfs, sizeof (struct ath_dfs)); ic->ic_dfs = (void *)dfs; dfs->ic = ic; ic->ic_dfs_debug = dfs_get_debug_info; #ifndef ATH_DFS_RADAR_DETECTION_ONLY dfs->dfs_nol = NULL; #endif /* * Zero out radar_info. It's possible that the attach function won't * fetch an initial regulatory configuration; you really do want to * ensure that the contents indicates there aren't any filters. */ OS_MEMZERO(&radar_info, sizeof(radar_info)); ic->ic_dfs_attach(ic, &dfs->dfs_caps, &radar_info); dfs_clear_stats(ic); dfs->dfs_event_log_on = 0; OS_INIT_TIMER(ic->ic_osdev, &(dfs->ath_dfs_task_timer), dfs_task, (void *) (ic)); #ifndef ATH_DFS_RADAR_DETECTION_ONLY OS_INIT_TIMER(ic->ic_osdev, &(dfs->ath_dfstesttimer), dfs_testtimer_task, (void *) ic); dfs->ath_dfs_cac_time = ATH_DFS_WAIT_MS; dfs->ath_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD_MS; #endif ATH_DFSQ_LOCK_INIT(dfs); STAILQ_INIT(&dfs->dfs_radarq); ATH_ARQ_LOCK_INIT(dfs); STAILQ_INIT(&dfs->dfs_arq); STAILQ_INIT(&(dfs->dfs_eventq)); ATH_DFSEVENTQ_LOCK_INIT(dfs); dfs->events = (struct dfs_event *)OS_MALLOC(ic->ic_osdev, sizeof(struct dfs_event)*DFS_MAX_EVENTS, GFP_ATOMIC); if (dfs->events == NULL) { OS_FREE(dfs); ic->ic_dfs = NULL; DFS_PRINTK("%s: events allocation failed\n", __func__); return 1; } for (i = 0; i < DFS_MAX_EVENTS; i++) { STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i], re_list); } dfs->pulses = (struct dfs_pulseline *)OS_MALLOC(ic->ic_osdev, sizeof(struct dfs_pulseline), GFP_ATOMIC); if (dfs->pulses == NULL) { OS_FREE(dfs->events); dfs->events = NULL; OS_FREE(dfs); ic->ic_dfs = NULL; DFS_PRINTK("%s: pulse buffer allocation failed\n", __func__); return 1; } dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK; /* Allocate memory for radar filters */ for (n=0; n<DFS_MAX_RADAR_TYPES; n++) { dfs->dfs_radarf[n] = (struct dfs_filtertype *)OS_MALLOC(ic->ic_osdev, sizeof(struct dfs_filtertype),GFP_ATOMIC); if (dfs->dfs_radarf[n] == NULL) { DFS_PRINTK("%s: cannot allocate memory for radar filter types\n", __func__); goto bad1; } OS_MEMZERO(dfs->dfs_radarf[n], sizeof(struct dfs_filtertype)); } /* Allocate memory for radar table */ dfs->dfs_radartable = (int8_t **)OS_MALLOC(ic->ic_osdev, 256*sizeof(int8_t *), GFP_ATOMIC); if (dfs->dfs_radartable == NULL) { DFS_PRINTK("%s: cannot allocate memory for radar table\n", __func__); goto bad1; } for (n=0; n<256; n++) { dfs->dfs_radartable[n] = OS_MALLOC(ic->ic_osdev, DFS_MAX_RADAR_OVERLAP*sizeof(int8_t), GFP_ATOMIC); if (dfs->dfs_radartable[n] == NULL) { DFS_PRINTK("%s: cannot allocate memory for radar table entry\n", __func__); goto bad2; } } if (usenol == 0) DFS_PRINTK("%s: NOL disabled\n", __func__); else if (usenol == 2) DFS_PRINTK("%s: NOL disabled; no CSA\n", __func__); dfs->dfs_rinfo.rn_use_nol = usenol; /* Init the cached extension channel busy for false alarm reduction */ dfs->dfs_rinfo.ext_chan_busy_ts = ic->ic_get_TSF64(ic); dfs->dfs_rinfo.dfs_ext_chan_busy = 0; /* Init the Bin5 chirping related data */ dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts; dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR; dfs->dfs_b5radars = NULL; /* * If dfs_init_radar_filters() fails, we can abort here and * reconfigure when the first valid channel + radar config * is available. */ if ( dfs_init_radar_filters( ic, &radar_info) ) { DFS_PRINTK(" %s: Radar Filter Intialization Failed \n", __func__); return 1; } dfs->ath_dfs_false_rssi_thres = RSSI_POSSIBLY_FALSE; dfs->ath_dfs_peak_mag = SEARCH_FFT_REPORT_PEAK_MAG_THRSH; dfs->dfs_phyerr_freq_min = 0x7fffffff; dfs->dfs_phyerr_freq_max = 0; dfs->dfs_phyerr_queued_count = 0; dfs->dfs_phyerr_w53_counter = 0; dfs->dfs_pri_multiplier = 2; dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S; return 0; bad2: OS_FREE(dfs->dfs_radartable); dfs->dfs_radartable = NULL; bad1: for (n=0; n<DFS_MAX_RADAR_TYPES; n++) { if (dfs->dfs_radarf[n] != NULL) { OS_FREE(dfs->dfs_radarf[n]); dfs->dfs_radarf[n] = NULL; } } if (dfs->pulses) { OS_FREE(dfs->pulses); dfs->pulses = NULL; } if (dfs->events) { OS_FREE(dfs->events); dfs->events = NULL; } if (ic->ic_dfs) { OS_FREE(ic->ic_dfs); ic->ic_dfs = NULL; } return 1; #undef N }
int dfs_attach(struct ath_softc *sc) { int i, n; struct ath_dfs *dfs = sc->sc_dfs; #define N(a) (sizeof(a)/sizeof(a[0])) if (dfs != NULL) { DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: sc_dfs was not NULL\n", __func__); return 1; } dfs = (struct ath_dfs *)OS_MALLOC(sc->sc_osdev, sizeof(struct ath_dfs), GFP_KERNEL); if (dfs == NULL) { DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: ath_dfs allocation failed\n", __func__); return 1; } OS_MEMZERO(dfs, sizeof (struct ath_dfs)); sc->sc_dfs = dfs; dfs->dfs_nol=NULL; dfs_clear_stats(sc); /* Get capability information - can extension channel radar be detected and should we use combined radar RSSI or not.*/ if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_COMBINED_RADAR_RSSI, 0, 0) == HAL_OK) { sc->sc_dfs->sc_dfs_combined_rssi_ok = 1; } else { sc->sc_dfs->sc_dfs_combined_rssi_ok = 0; } if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_EXT_CHAN_DFS, 0, 0) == HAL_OK) { sc->sc_dfs->sc_dfs_ext_chan_ok = 1; } else { sc->sc_dfs->sc_dfs_ext_chan_ok = 0; } if (ath_hal_hasenhanceddfssupport(sc->sc_ah)) { sc->sc_dfs->sc_dfs_use_enhancement = 1; DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: use DFS enhancements\n", __func__); } else { sc->sc_dfs->sc_dfs_use_enhancement = 0; } sc->sc_dfs->sc_dfs_cac_time = ATH_DFS_WAIT_MS; sc->sc_dfs->sc_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD_MS; ATH_DFSQ_LOCK_INIT(dfs); STAILQ_INIT(&dfs->dfs_radarq); ATH_ARQ_LOCK_INIT(dfs); STAILQ_INIT(&dfs->dfs_arq); STAILQ_INIT(&(dfs->dfs_eventq)); ATH_DFSEVENTQ_LOCK_INIT(dfs); dfs->events = (struct dfs_event *)OS_MALLOC(sc->sc_osdev, sizeof(struct dfs_event)*DFS_MAX_EVENTS, GFP_KERNEL); if (dfs->events == NULL) { OS_FREE(dfs); sc->sc_dfs = NULL; DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: events allocation failed\n", __func__); return 1; } for (i=0; i<DFS_MAX_EVENTS; i++) { STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i], re_list); } dfs->pulses = (struct dfs_pulseline *)OS_MALLOC(sc->sc_osdev, sizeof(struct dfs_pulseline), GFP_KERNEL); if (dfs->pulses == NULL) { OS_FREE(dfs->events); dfs->events = NULL; OS_FREE(dfs); sc->sc_dfs = NULL; DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: pulse buffer allocation failed\n", __func__); return 1; } dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK; #ifdef ATH_ENABLE_AR if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PHYDIAG, HAL_CAP_AR, NULL) == HAL_OK) { dfs_reset_ar(sc); dfs_reset_arq(sc); dfs->dfs_proc_phyerr |= DFS_AR_EN; } #endif if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PHYDIAG, HAL_CAP_RADAR, NULL) == HAL_OK) { u_int32_t val; /* * If we have fast diversity capability, read off * Strong Signal fast diversity count set in the ini * file, and store so we can restore the value when * radar is disabled */ if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_DIVERSITY, HAL_CAP_STRONG_DIV, &val) == HAL_OK) { dfs->dfs_rinfo.rn_fastdivGCval = val; } dfs->dfs_proc_phyerr |= DFS_RADAR_EN; /* Allocate memory for radar filters */ for (n=0; n<DFS_MAX_RADAR_TYPES; n++) { dfs->dfs_radarf[n] = (struct dfs_filtertype *)OS_MALLOC(sc->sc_osdev, sizeof(struct dfs_filtertype),GFP_KERNEL); if (dfs->dfs_radarf[n] == NULL) { DFS_DPRINTK(sc,ATH_DEBUG_DFS, "%s: cannot allocate memory for radar filter types\n", __func__); goto bad1; } OS_MEMZERO(dfs->dfs_radarf[n], sizeof(struct dfs_filtertype)); } /* Allocate memory for radar table */ dfs->dfs_radartable = (int8_t **)OS_MALLOC(sc->sc_osdev, 256*sizeof(int8_t *), GFP_KERNEL); if (dfs->dfs_radartable == NULL) { DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: cannot allocate memory for radar table\n", __func__); goto bad1; } for (n=0; n<256; n++) { dfs->dfs_radartable[n] = OS_MALLOC(sc->sc_osdev, DFS_MAX_RADAR_OVERLAP*sizeof(int8_t), GFP_KERNEL); if (dfs->dfs_radartable[n] == NULL) { DFS_DPRINTK(sc, ATH_DEBUG_DFS, "%s: cannot allocate memory for radar table entry\n", __func__); goto bad2; } } if (usenol != 1) { DFS_DPRINTK(sc, ATH_DEBUG_DFS, " %s: Disabling Channel NOL\n", __func__); } dfs->dfs_rinfo.rn_use_nol = usenol; /* Init the cached extension channel busy for false alarm reduction */ dfs->dfs_rinfo.ext_chan_busy_ts = ath_hal_gettsf64(sc->sc_ah); dfs->dfs_rinfo.dfs_ext_chan_busy = 0; /* Init the Bin5 chirping related data */ dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts; dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR; dfs->dfs_b5radars = NULL; if ( dfs_init_radar_filters( sc ) ) { DFS_DPRINTK(sc, ATH_DEBUG_DFS, " %s: Radar Filter Intialization Failed \n", __func__); return 1; } } return 0; bad2: OS_FREE(dfs->dfs_radartable); dfs->dfs_radartable = NULL; bad1: for (n=0; n<DFS_MAX_RADAR_TYPES; n++) { if (dfs->dfs_radarf[n] != NULL) { OS_FREE(dfs->dfs_radarf[n]); dfs->dfs_radarf[n] = NULL; } } if (dfs->pulses) { OS_FREE(dfs->pulses); dfs->pulses = NULL; } if (dfs->events) { OS_FREE(dfs->events); dfs->events = NULL; } if (sc->sc_dfs) { OS_FREE(sc->sc_dfs); sc->sc_dfs = NULL; } return 1; #undef N }