示例#1
0
void ath_hw_check(struct work_struct *work)
{
	struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	unsigned long flags;
	int busy;

	ath9k_ps_wakeup(sc);
	if (ath9k_hw_check_alive(sc->sc_ah))
		goto out;

	spin_lock_irqsave(&common->cc_lock, flags);
	busy = ath_update_survey_stats(sc);
	spin_unlock_irqrestore(&common->cc_lock, flags);

	ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
		"busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
	if (busy >= 99) {
		if (++sc->hw_busy_count >= 3) {
			spin_lock_bh(&sc->sc_pcu_lock);
			ath_reset(sc, true);
			spin_unlock_bh(&sc->sc_pcu_lock);
		}
	} else if (busy >= 0)
		sc->hw_busy_count = 0;

out:
	ath9k_ps_restore(sc);
}
示例#2
0
static void ath_bstuck_process(struct ath_softc *sc)
{
	DPRINTF(sc, ATH_DBG_BEACON,
		"stuck beacon; resetting (bmiss count %u)\n",
		sc->beacon.bmisscnt);
	ath_reset(sc, false);
}
示例#3
0
static int
ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
{
	struct ath_softc *sc = arg1;
	int intmit, error;

	intmit = ath_hal_getintmit(sc->sc_ah);
	error = sysctl_handle_int(oidp, &intmit, 0, req);
	if (error || !req->newptr)
		return error;

	/* reusing error; 1 here means "good"; 0 means "fail" */
	error = ath_hal_setintmit(sc->sc_ah, intmit);
	if (! error)
		return EINVAL;

	/*
	 * Reset the hardware here - disabling ANI in the HAL
	 * doesn't reset ANI related registers, so it'll leave
	 * things in an inconsistent state.
	 */
	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
		ath_reset(sc->sc_ifp);

	return 0;
}
示例#4
0
文件: tx99.c 项目: AK101111/linux
static void ath9k_tx99_deinit(struct ath_softc *sc)
{
	ath_reset(sc, NULL);

	ath9k_ps_wakeup(sc);
	ath9k_tx99_stop(sc);
	ath9k_ps_restore(sc);
}
示例#5
0
文件: tx99.c 项目: 020gzh/linux
static int ath9k_tx99_init(struct ath_softc *sc)
{
	struct ieee80211_hw *hw = sc->hw;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ath_tx_control txctl;
	int r;

	if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
		ath_err(common,
			"driver is in invalid state unable to use TX99");
		return -EINVAL;
	}

	sc->tx99_skb = ath9k_build_tx99_skb(sc);
	if (!sc->tx99_skb)
		return -ENOMEM;

	memset(&txctl, 0, sizeof(txctl));
	txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];

	ath_reset(sc, NULL);

	ath9k_ps_wakeup(sc);

	ath9k_hw_disable_interrupts(ah);
	atomic_set(&ah->intr_ref_cnt, -1);
	ath_drain_all_txq(sc);
	ath_stoprecv(sc);

	sc->tx99_state = true;

	ieee80211_stop_queues(hw);

	if (sc->tx99_power == MAX_RATE_POWER + 1)
		sc->tx99_power = MAX_RATE_POWER;

	ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
	r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
	if (r) {
		ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
		return r;
	}

	ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
		sc->tx99_power,
		sc->tx99_power / 2);

	/* We leave the harware awake as it will be chugging on */

	return 0;
}
示例#6
0
static int
ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
{
	struct ath_softc *sc = arg1;
	struct ifnet *ifp = sc->sc_ifp;
	u_int32_t scale;
	int error;

	(void) ath_hal_gettpscale(sc->sc_ah, &scale);
	error = sysctl_handle_int(oidp, &scale, 0, req);
	if (error || !req->newptr)
		return error;
	return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
	    (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
}
示例#7
0
static int
ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
{
	struct ath_softc *sc = arg1;
	struct ifnet *ifp = sc->sc_ifp;
	struct ath_hal *ah = sc->sc_ah;
	u_int rfkill = ath_hal_getrfkill(ah);
	int error;

	error = sysctl_handle_int(oidp, &rfkill, 0, req);
	if (error || !req->newptr)
		return error;
	if (rfkill == ath_hal_getrfkill(ah))	/* unchanged */
		return 0;
	if (!ath_hal_setrfkill(ah, rfkill))
		return EINVAL;
	return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
}
示例#8
0
static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
{
	static int count;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);

	if (pll_sqsum >= 0x40000) {
		count++;
		if (count == 3) {
			/* Rx is hung for more than 500ms. Reset it */
			ath_dbg(common, ATH_DBG_RESET,
				"Possible RX hang, resetting");
			spin_lock_bh(&sc->sc_pcu_lock);
			ath_reset(sc, true);
			spin_unlock_bh(&sc->sc_pcu_lock);
			count = 0;
		}
	} else
		count = 0;
}
示例#9
0
/* 
 * XXX : This is temp export, testing the reset hack
 *
 */
void ath_internal_reset(struct ath_softc *sc)
{
    struct ieee80211com *ic = (struct ieee80211com *)(sc->sc_ieee);
    int enable_radar = 0;
#if ATH_SUPPORT_FLOWMAC_MODULE
    if (sc->sc_osnetif_flowcntrl) {
        ath_netif_stop_queue(sc);
    }
#endif
    if ( (sc->sc_reset_type ==  ATH_RESET_NOLOSS) ){
        if (ath_hal_getrxfilter(sc->sc_ah) & HAL_RX_FILTER_PHYRADAR) {
            if (sc->sc_opmode == HAL_M_IBSS) {
                printk("%s: radar was enabled before reset\n", __func__);
                enable_radar = 1;
            }
        }
        /* setting the no-flush, to preserve the packets already 
         * there in SW and HW queues. */
        ath_reset_start(sc, RESET_RETRY_TXQ, 0, 0);
        /*
         * when we pass RESET_RETRY_TXQ flag to ath_reset_start, 
         * ath_reset() will be called inside ath_reset_start().
         * So ath_reset() is commented out here for now. 
         */
        //ath_reset(sc);
        ath_reset_end(sc, RESET_RETRY_TXQ);
        if (enable_radar) {
            printk("%s: enabling radar after reset\n", __func__);
            ic->ic_enable_radar(ic, 1);
        }
    } else {
        ath_reset_start(sc, 0, 0, 0);
        ath_reset(sc);
        ath_reset_end(sc, 0);
    }

#if ATH_SUPPORT_FLOWMAC_MODULE
    if (sc->sc_osnetif_flowcntrl) {
        ath_netif_wake_queue(sc);
    }
#endif
}
示例#10
0
void ath9k_tasklet(unsigned long data)
{
	struct ath_softc *sc = (struct ath_softc *)data;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);

	u32 status = sc->intrstatus;
	u32 rxmask;

	if ((status & ATH9K_INT_FATAL) ||
	    (status & ATH9K_INT_BB_WATCHDOG)) {
		spin_lock(&sc->sc_pcu_lock);
		ath_reset(sc, true);
		spin_unlock(&sc->sc_pcu_lock);
		return;
	}

	ath9k_ps_wakeup(sc);
	spin_lock(&sc->sc_pcu_lock);

	/*
	 * Only run the baseband hang check if beacons stop working in AP or
	 * IBSS mode, because it has a high false positive rate. For station
	 * mode it should not be necessary, since the upper layers will detect
	 * this through a beacon miss automatically and the following channel
	 * change will trigger a hardware reset anyway
	 */
	if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
	    !ath9k_hw_check_alive(ah))
		ieee80211_queue_work(sc->hw, &sc->hw_check_work);

	if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
		/*
		 * TSF sync does not look correct; remain awake to sync with
		 * the next Beacon.
		 */
		ath_dbg(common, ATH_DBG_PS,
			"TSFOOR - Sync with next Beacon\n");
		sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC |
				PS_TSFOOR_SYNC;
	}

	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
			  ATH9K_INT_RXORN);
	else
		rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);

	if (status & rxmask) {
		/* Check for high priority Rx first */
		if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
		    (status & ATH9K_INT_RXHP))
			ath_rx_tasklet(sc, 0, true);

		ath_rx_tasklet(sc, 0, false);
	}

	if (status & ATH9K_INT_TX) {
		if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
			ath_tx_edma_tasklet(sc);
		else
			ath_tx_tasklet(sc);
	}

	if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
		if (status & ATH9K_INT_GENTIMER)
			ath_gen_timer_isr(sc->sc_ah);

	/* re-enable hardware interrupt */
	ath9k_hw_enable_interrupts(ah);

	spin_unlock(&sc->sc_pcu_lock);
	ath9k_ps_restore(sc);
}
示例#11
0
/*
 * pause the xmit traffic of a specified vap
 * and requeue the traffic back on to the 
 * corresponding nodes (tid queues of nodes).
 * if vap is null then pause all the vaps.
 * handle both aggregates and non aggregate
 * frames. in the case of management frames
 * the frames are dropped (completed with error).
 * the caller should have paused all the nodes (tids of the nodes)
 * before pausing the vap.
 */ 
static void ath_tx_vap_pause_txqs(struct ath_softc *sc,  struct ath_vap *avp )
{
    u_int32_t i;
    int npend = 0;
    struct ath_hal *ah = sc->sc_ah;
    u_int8_t q_needs_pause[HAL_NUM_TX_QUEUES];
    int restart_after_reset=0;

    spin_lock(&(sc)->sc_vap_pause_lock);// This lock is only used for escalate irql
restart:
    npend = 0;

    ath_vap_pause_set_in_progress(sc);

    OS_MEMZERO(q_needs_pause, sizeof(q_needs_pause));
    /* 
     * stop all the HAL data queues
     */
    if (!sc->sc_invalid) {
#ifndef ATH_SUPPORT_HTC
        struct ath_txq *txq=NULL;
        if (avp == NULL && sc->sc_fastabortenabled) {
           (void) ath_hal_aborttxdma(ah);
            for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
                if (ATH_TXQ_SETUP(sc, i)) {
                    int n_q_pending=0;
                    txq = &sc->sc_txq[i];
                    /* The TxDMA may not really be stopped.
                     * Double check the hal tx pending count
                     */
                     n_q_pending = ath_hal_numtxpending(ah, sc->sc_txq[i].axq_qnum);
                     if (n_q_pending) {
                        (void) ath_hal_stoptxdma(ah, txq->axq_qnum, 0);
                        npend += ath_hal_numtxpending(ah, sc->sc_txq[i].axq_qnum);
                     }
                }
                /*
                 * at this point all Data queues are paused
                 * all the queues need to processed and restarted.
                 */
                q_needs_pause[i] = AH_TRUE; 
            }
        } else {
            for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
                if (ATH_TXQ_SETUP(sc, i)) {
                    txq = &sc->sc_txq[i];
                    /* check if the queue needs to be paused */
                    q_needs_pause[i] = ath_tx_vap_check_txq_needs_pause(sc,txq,avp);
                }
            }

            (void) ath_hal_aborttxdma(ah);

            for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
                if (ATH_TXQ_SETUP(sc, i)) {
                    int n_q_pending=0;
                    txq = &sc->sc_txq[i];
                     n_q_pending = ath_hal_numtxpending(ah, sc->sc_txq[i].axq_qnum);
                     if (n_q_pending) {
                        npend += ath_hal_numtxpending(ah, sc->sc_txq[i].axq_qnum);
                     }
                }
            }
        }
#endif
    }

    if (npend && !restart_after_reset) {
        ath_vap_pause_clear_in_progress(sc);
        spin_unlock(&(sc)->sc_vap_pause_lock);
#ifdef AR_DEBUG		
       ath_dump_descriptors(sc);
#endif
        /* TxDMA not stopped, reset the hal */
        DPRINTF(sc, ATH_DEBUG_RESET, "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);

        ath_reset_start(sc, 0, 0, 0);
        ath_reset(sc);
        ath_reset_end(sc, 0);
        restart_after_reset=1;
        spin_lock(&(sc)->sc_vap_pause_lock);
        goto restart;
    }

    if (npend && restart_after_reset) {
        /* TxDMA not stopped, reset the hal */
        DPRINTF(sc, ATH_DEBUG_RESET, "%s: Unable to stop TxDMA Even after Reset, ignore and continue \n", __func__);
    }
    /* TO-DO need to handle cab queue */

    /* at this point the HW xmit should have been completely stopped. */
    if (sc->sc_enhanceddmasupport) {
        ath_tx_edma_process(sc);
    }
    for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
        if (q_needs_pause[i]) {
#ifdef ATH_TX_BUF_FLOW_CNTL
            struct ath_txq *txq = &sc->sc_txq[i];
#if ATH_DEBUG == 0
            /* Fix the compile error when ATH_DEBUG = 0 */
            txq = txq;
#endif 
            DPRINTF(sc, ATH_DEBUG_ANY,"#####%s : %d  qnum %d buf_used %d \n",
                            __func__,__LINE__,txq->axq_qnum, txq->axq_num_buf_used );
#endif
            if (!sc->sc_enhanceddmasupport) {
                ath_tx_processq(sc, &sc->sc_txq[i]); /* process any frames that are completed */
            }
            ath_tx_vap_pause_txq(sc, &sc->sc_txq[i], avp);
#ifdef ATH_TX_BUF_FLOW_CNTL
            DPRINTF(sc, ATH_DEBUG_ANY,"#####%s : %d  qnum %d buf_used %d  \n",
                            __func__,__LINE__,txq->axq_qnum, txq->axq_num_buf_used);
#endif
        }
    }
    ath_vap_pause_clear_in_progress(sc);
    spin_unlock(&(sc)->sc_vap_pause_lock);
}