Exemple #1
0
/* The firmware does not support setting the coverage class. Instead this
 * function monitors and modifies the corresponding MAC registers.
 */
static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
						 s16 value)
{
	u32 slottime_reg;
	u32 slottime;
	u32 timeout_reg;
	u32 ack_timeout;
	u32 cts_timeout;
	u32 phyclk_reg;
	u32 phyclk;
	u64 fw_dbglog_mask;
	u32 fw_dbglog_level;

	mutex_lock(&ar->conf_mutex);

	/* Only modify registers if the core is started. */
	if ((ar->state != ATH10K_STATE_ON) &&
	    (ar->state != ATH10K_STATE_RESTARTED)) {
		spin_lock_bh(&ar->data_lock);
		/* Store config value for when radio boots up */
		ar->fw_coverage.coverage_class = value;
		spin_unlock_bh(&ar->data_lock);
		goto unlock;
	}

	/* Retrieve the current values of the two registers that need to be
	 * adjusted.
	 */
	slottime_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
					     WAVE1_PCU_GBL_IFS_SLOT);
	timeout_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
					    WAVE1_PCU_ACK_CTS_TIMEOUT);
	phyclk_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
					   WAVE1_PHYCLK);
	phyclk = MS(phyclk_reg, WAVE1_PHYCLK_USEC) + 1;

	if (value < 0)
		value = ar->fw_coverage.coverage_class;

	/* Break out if the coverage class and registers have the expected
	 * value.
	 */
	if (value == ar->fw_coverage.coverage_class &&
	    slottime_reg == ar->fw_coverage.reg_slottime_conf &&
	    timeout_reg == ar->fw_coverage.reg_ack_cts_timeout_conf &&
	    phyclk_reg == ar->fw_coverage.reg_phyclk)
		goto unlock;

	/* Store new initial register values from the firmware. */
	if (slottime_reg != ar->fw_coverage.reg_slottime_conf)
		ar->fw_coverage.reg_slottime_orig = slottime_reg;
	if (timeout_reg != ar->fw_coverage.reg_ack_cts_timeout_conf)
		ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg;
	ar->fw_coverage.reg_phyclk = phyclk_reg;

	/* Calculate new value based on the (original) firmware calculation. */
	slottime_reg = ar->fw_coverage.reg_slottime_orig;
	timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig;

	/* Do some sanity checks on the slottime register. */
	if (slottime_reg % phyclk) {
		ath10k_warn(ar,
			    "failed to set coverage class: expected integer microsecond value in register\n");

		goto store_regs;
	}

	slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
	slottime = slottime / phyclk;
	if (slottime != 9 && slottime != 20) {
		ath10k_warn(ar,
			    "failed to set coverage class: expected slot time of 9 or 20us in HW register. It is %uus.\n",
			    slottime);

		goto store_regs;
	}

	/* Recalculate the register values by adding the additional propagation
	 * delay (3us per coverage class).
	 */

	slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
	slottime += value * 3 * phyclk;
	slottime = min_t(u32, slottime, WAVE1_PCU_GBL_IFS_SLOT_MAX);
	slottime = SM(slottime, WAVE1_PCU_GBL_IFS_SLOT);
	slottime_reg = (slottime_reg & ~WAVE1_PCU_GBL_IFS_SLOT_MASK) | slottime;

	/* Update ack timeout (lower halfword). */
	ack_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
	ack_timeout += 3 * value * phyclk;
	ack_timeout = min_t(u32, ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
	ack_timeout = SM(ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);

	/* Update cts timeout (upper halfword). */
	cts_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
	cts_timeout += 3 * value * phyclk;
	cts_timeout = min_t(u32, cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
	cts_timeout = SM(cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);

	timeout_reg = ack_timeout | cts_timeout;

	ath10k_hif_write32(ar,
			   WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_GBL_IFS_SLOT,
			   slottime_reg);
	ath10k_hif_write32(ar,
			   WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_ACK_CTS_TIMEOUT,
			   timeout_reg);

	/* Ensure we have a debug level of WARN set for the case that the
	 * coverage class is larger than 0. This is important as we need to
	 * set the registers again if the firmware does an internal reset and
	 * this way we will be notified of the event.
	 */
	fw_dbglog_mask = ath10k_debug_get_fw_dbglog_mask(ar);
	fw_dbglog_level = ath10k_debug_get_fw_dbglog_level(ar);

	if (value > 0) {
		if (fw_dbglog_level > ATH10K_DBGLOG_LEVEL_WARN)
			fw_dbglog_level = ATH10K_DBGLOG_LEVEL_WARN;
		fw_dbglog_mask = ~0;
	}

	ath10k_wmi_dbglog_cfg(ar, fw_dbglog_mask, fw_dbglog_level);

store_regs:
	/* After an error we will not retry setting the coverage class. */
	spin_lock_bh(&ar->data_lock);
	ar->fw_coverage.coverage_class = value;
	spin_unlock_bh(&ar->data_lock);

	ar->fw_coverage.reg_slottime_conf = slottime_reg;
	ar->fw_coverage.reg_ack_cts_timeout_conf = timeout_reg;

unlock:
	mutex_unlock(&ar->conf_mutex);
}
/*
 * Set all the beacon related bits on the h/w for stations
 * i.e. initializes the corresponding h/w timers;
 * also tells the h/w whether to anticipate PCF beacons
 */
void
ar5212SetStaBeaconTimers(struct ath_hal *ah, const HAL_BEACON_STATE *bs)
{
	struct ath_hal_5212 *ahp = AH5212(ah);
	uint32_t nextTbtt, nextdtim,beaconintval, dtimperiod;

	HALASSERT(bs->bs_intval != 0);
	/* if the AP will do PCF */
	if (bs->bs_cfpmaxduration != 0) {
		/* tell the h/w that the associated AP is PCF capable */
		OS_REG_WRITE(ah, AR_STA_ID1,
			OS_REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PCF);

		/* set CFP_PERIOD(1.024ms) register */
		OS_REG_WRITE(ah, AR_CFP_PERIOD, bs->bs_cfpperiod);

		/* set CFP_DUR(1.024ms) register to max cfp duration */
		OS_REG_WRITE(ah, AR_CFP_DUR, bs->bs_cfpmaxduration);

		/* set TIMER2(128us) to anticipated time of next CFP */
		OS_REG_WRITE(ah, AR_TIMER2, bs->bs_cfpnext << 3);
	} else {
		/* tell the h/w that the associated AP is not PCF capable */
		OS_REG_WRITE(ah, AR_STA_ID1,
			OS_REG_READ(ah, AR_STA_ID1) &~ AR_STA_ID1_PCF);
	}

	/*
	 * Set TIMER0(1.024ms) to the anticipated time of the next beacon.
	 */
	OS_REG_WRITE(ah, AR_TIMER0, bs->bs_nexttbtt);

	/*
	 * Start the beacon timers by setting the BEACON register
	 * to the beacon interval; also write the tim offset which
	 * we should know by now.  The code, in ar5211WriteAssocid,
	 * also sets the tim offset once the AID is known which can
	 * be left as such for now.
	 */
	OS_REG_WRITE(ah, AR_BEACON, 
		(OS_REG_READ(ah, AR_BEACON) &~ (AR_BEACON_PERIOD|AR_BEACON_TIM))
		| SM(bs->bs_intval, AR_BEACON_PERIOD)
		| SM(bs->bs_timoffset ? bs->bs_timoffset + 4 : 0, AR_BEACON_TIM)
	);

	/*
	 * Configure the BMISS interrupt.  Note that we
	 * assume the caller blocks interrupts while enabling
	 * the threshold.
	 */
	HALASSERT(bs->bs_bmissthreshold <= MS(0xffffffff, AR_RSSI_THR_BM_THR));
	ahp->ah_rssiThr = (ahp->ah_rssiThr &~ AR_RSSI_THR_BM_THR)
			| SM(bs->bs_bmissthreshold, AR_RSSI_THR_BM_THR);
	OS_REG_WRITE(ah, AR_RSSI_THR, ahp->ah_rssiThr);

	/*
	 * Program the sleep registers to correlate with the beacon setup.
	 */

	/*
	 * Oahu beacons timers on the station were used for power
	 * save operation (waking up in anticipation of a beacon)
	 * and any CFP function; Venice does sleep/power-save timers
	 * differently - so this is the right place to set them up;
	 * don't think the beacon timers are used by venice sta hw
	 * for any useful purpose anymore
	 * Setup venice's sleep related timers
	 * Current implementation assumes sw processing of beacons -
	 *   assuming an interrupt is generated every beacon which
	 *   causes the hardware to become awake until the sw tells
	 *   it to go to sleep again; beacon timeout is to allow for
	 *   beacon jitter; cab timeout is max time to wait for cab
	 *   after seeing the last DTIM or MORE CAB bit
	 */
#define CAB_TIMEOUT_VAL     10 /* in TU */
#define BEACON_TIMEOUT_VAL  10 /* in TU */
#define SLEEP_SLOP          3  /* in TU */

	/*
	 * For max powersave mode we may want to sleep for longer than a
	 * beacon period and not want to receive all beacons; modify the
	 * timers accordingly; make sure to align the next TIM to the
	 * next DTIM if we decide to wake for DTIMs only
	 */
	beaconintval = bs->bs_intval & HAL_BEACON_PERIOD;
	HALASSERT(beaconintval != 0);
	if (bs->bs_sleepduration > beaconintval) {
		HALASSERT(roundup(bs->bs_sleepduration, beaconintval) ==
				bs->bs_sleepduration);
		beaconintval = bs->bs_sleepduration;
	}
	dtimperiod = bs->bs_dtimperiod;
	if (bs->bs_sleepduration > dtimperiod) {
		HALASSERT(dtimperiod == 0 ||
			roundup(bs->bs_sleepduration, dtimperiod) ==
				bs->bs_sleepduration);
		dtimperiod = bs->bs_sleepduration;
	}
	HALASSERT(beaconintval <= dtimperiod);
	if (beaconintval == dtimperiod)
		nextTbtt = bs->bs_nextdtim;
	else
		nextTbtt = bs->bs_nexttbtt;
	nextdtim = bs->bs_nextdtim;

	OS_REG_WRITE(ah, AR_SLEEP1,
		  SM((nextdtim - SLEEP_SLOP) << 3, AR_SLEEP1_NEXT_DTIM)
		| SM(CAB_TIMEOUT_VAL, AR_SLEEP1_CAB_TIMEOUT)
		| AR_SLEEP1_ASSUME_DTIM
		| AR_SLEEP1_ENH_SLEEP_ENA
	);
	OS_REG_WRITE(ah, AR_SLEEP2,
		  SM((nextTbtt - SLEEP_SLOP) << 3, AR_SLEEP2_NEXT_TIM)
		| SM(BEACON_TIMEOUT_VAL, AR_SLEEP2_BEACON_TIMEOUT)
	);
	OS_REG_WRITE(ah, AR_SLEEP3,
		  SM(beaconintval, AR_SLEEP3_TIM_PERIOD)
		| SM(dtimperiod, AR_SLEEP3_DTIM_PERIOD)
	);
	HALDEBUG(ah, HAL_DEBUG_BEACON, "%s: next DTIM %d\n",
	    __func__, bs->bs_nextdtim);
	HALDEBUG(ah, HAL_DEBUG_BEACON, "%s: next beacon %d\n",
	    __func__, nextTbtt);
	HALDEBUG(ah, HAL_DEBUG_BEACON, "%s: beacon period %d\n",
	    __func__, beaconintval);
	HALDEBUG(ah, HAL_DEBUG_BEACON, "%s: DTIM period %d\n",
	    __func__, dtimperiod);
#undef CAB_TIMEOUT_VAL
#undef BEACON_TIMEOUT_VAL
#undef SLEEP_SLOP
}
static void
ar9280AniSetup(struct ath_hal *ah)
{
	/*
	 * These are the parameters from the AR5416 ANI code;
	 * they likely need quite a bit of adjustment for the
	 * AR9280.
	 */
        static const struct ar5212AniParams aniparams = {
                .maxNoiseImmunityLevel  = 4,    /* levels 0..4 */
                .totalSizeDesired       = { -55, -55, -55, -55, -62 },
                .coarseHigh             = { -14, -14, -14, -14, -12 },
                .coarseLow              = { -64, -64, -64, -64, -70 },
                .firpwr                 = { -78, -78, -78, -78, -80 },
                .maxSpurImmunityLevel   = 7,
                .cycPwrThr1             = { 2, 4, 6, 8, 10, 12, 14, 16 },
                .maxFirstepLevel        = 2,    /* levels 0..2 */
                .firstep                = { 0, 4, 8 },
                .ofdmTrigHigh           = 500,
                .ofdmTrigLow            = 200,
                .cckTrigHigh            = 200,
                .cckTrigLow             = 100,
                .rssiThrHigh            = 40,
                .rssiThrLow             = 7,
                .period                 = 100,
        };
	/* NB: disable ANI noise immmunity for reliable RIFS rx */
	AH5416(ah)->ah_ani_function &= ~(1 << HAL_ANI_NOISE_IMMUNITY_LEVEL);

        /* NB: ANI is not enabled yet */
        ar5416AniAttach(ah, &aniparams, &aniparams, AH_TRUE);
}

void
ar9280InitPLL(struct ath_hal *ah, const struct ieee80211_channel *chan)
{
	uint32_t pll = SM(0x5, AR_RTC_SOWL_PLL_REFDIV);

	if (AR_SREV_MERLIN_20(ah) &&
	    chan != AH_NULL && IEEE80211_IS_CHAN_5GHZ(chan)) {
		/*
		 * PLL WAR for Merlin 2.0/2.1
		 * When doing fast clock, set PLL to 0x142c
		 * Else, set PLL to 0x2850 to prevent reset-to-reset variation 
		 */
		pll = IS_5GHZ_FAST_CLOCK_EN(ah, chan) ? 0x142c : 0x2850;
		if (IEEE80211_IS_CHAN_HALF(chan))
			pll |= SM(0x1, AR_RTC_SOWL_PLL_CLKSEL);
		else if (IEEE80211_IS_CHAN_QUARTER(chan))
			pll |= SM(0x2, AR_RTC_SOWL_PLL_CLKSEL);
	} else if (AR_SREV_MERLIN_10_OR_LATER(ah)) {
		pll = SM(0x5, AR_RTC_SOWL_PLL_REFDIV);
		if (chan != AH_NULL) {
			if (IEEE80211_IS_CHAN_HALF(chan))
				pll |= SM(0x1, AR_RTC_SOWL_PLL_CLKSEL);
			else if (IEEE80211_IS_CHAN_QUARTER(chan))
				pll |= SM(0x2, AR_RTC_SOWL_PLL_CLKSEL);
			if (IEEE80211_IS_CHAN_5GHZ(chan))
				pll |= SM(0x28, AR_RTC_SOWL_PLL_DIV);
			else
				pll |= SM(0x2c, AR_RTC_SOWL_PLL_DIV);
		} else
			pll |= SM(0x2c, AR_RTC_SOWL_PLL_DIV);
	}

	OS_REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
	OS_DELAY(RTC_PLL_SETTLE_DELAY);
	OS_REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_SLEEP_DERIVED_CLK);
}
Exemple #4
0
Fichier : htc.c Projet : muonn/ath
int ath10k_htc_connect_service(struct ath10k_htc *htc,
			       struct ath10k_htc_svc_conn_req *conn_req,
			       struct ath10k_htc_svc_conn_resp *conn_resp)
{
	struct ath10k_htc_msg *msg;
	struct ath10k_htc_conn_svc *req_msg;
	struct ath10k_htc_conn_svc_response resp_msg_dummy;
	struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
	enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
	struct ath10k_htc_ep *ep;
	struct sk_buff *skb;
	unsigned int max_msg_size = 0;
	int length, status;
	bool disable_credit_flow_ctrl = false;
	u16 message_id, service_id, flags = 0;
	u8 tx_alloc = 0;

	/* special case for HTC pseudo control service */
	if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
		disable_credit_flow_ctrl = true;
		assigned_eid = ATH10K_HTC_EP_0;
		max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
		goto setup;
	}

	tx_alloc = ath10k_htc_get_credit_allocation(htc,
						    conn_req->service_id);
	if (!tx_alloc)
		ath10k_dbg(ATH10K_DBG_BOOT,
			   "boot htc service %s does not allocate target credits\n",
			   htc_service_name(conn_req->service_id));

	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
	if (!skb) {
		ath10k_err("Failed to allocate HTC packet\n");
		return -ENOMEM;
	}

	length = sizeof(msg->hdr) + sizeof(msg->connect_service);
	skb_put(skb, length);
	memset(skb->data, 0, length);

	msg = (struct ath10k_htc_msg *)skb->data;
	msg->hdr.message_id =
		__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);

	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);

	/* Only enable credit flow control for WMI ctrl service */
	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
		disable_credit_flow_ctrl = true;
	}

	req_msg = &msg->connect_service;
	req_msg->flags = __cpu_to_le16(flags);
	req_msg->service_id = __cpu_to_le16(conn_req->service_id);

	reinit_completion(&htc->ctl_resp);

	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
	if (status) {
		kfree_skb(skb);
		return status;
	}

	/* wait for response */
	status = wait_for_completion_timeout(&htc->ctl_resp,
					     ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
	if (status <= 0) {
		if (status == 0)
			status = -ETIMEDOUT;
		ath10k_err("Service connect timeout: %d\n", status);
		return status;
	}

	/* we controlled the buffer creation, it's aligned */
	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
	resp_msg = &msg->connect_service_response;
	message_id = __le16_to_cpu(msg->hdr.message_id);
	service_id = __le16_to_cpu(resp_msg->service_id);

	if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
	    (htc->control_resp_len < sizeof(msg->hdr) +
	     sizeof(msg->connect_service_response))) {
		ath10k_err("Invalid resp message ID 0x%x", message_id);
		return -EPROTO;
	}

	ath10k_dbg(ATH10K_DBG_HTC,
		   "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
		   htc_service_name(service_id),
		   resp_msg->status, resp_msg->eid);

	conn_resp->connect_resp_code = resp_msg->status;

	/* check response status */
	if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
		ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
			   htc_service_name(service_id),
			   resp_msg->status);
		return -EPROTO;
	}

	assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
	max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);

setup:

	if (assigned_eid >= ATH10K_HTC_EP_COUNT)
		return -EPROTO;

	if (max_msg_size == 0)
		return -EPROTO;

	ep = &htc->endpoint[assigned_eid];
	ep->eid = assigned_eid;

	if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
		return -EPROTO;

	/* return assigned endpoint to caller */
	conn_resp->eid = assigned_eid;
	conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);

	/* setup the endpoint */
	ep->service_id = conn_req->service_id;
	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
	ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
	ep->tx_credits = tx_alloc;
	ep->tx_credit_size = htc->target_credit_size;
	ep->tx_credits_per_max_message = ep->max_ep_message_len /
					 htc->target_credit_size;

	if (ep->max_ep_message_len % htc->target_credit_size)
		ep->tx_credits_per_max_message++;

	/* copy all the callbacks */
	ep->ep_ops = conn_req->ep_ops;

	status = ath10k_hif_map_service_to_pipe(htc->ar,
						ep->service_id,
						&ep->ul_pipe_id,
						&ep->dl_pipe_id,
						&ep->ul_is_polled,
						&ep->dl_is_polled);
	if (status)
		return status;

	ath10k_dbg(ATH10K_DBG_BOOT,
		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
		   htc_service_name(ep->service_id), ep->ul_pipe_id,
		   ep->dl_pipe_id, ep->eid);

	ath10k_dbg(ATH10K_DBG_BOOT,
		   "boot htc ep %d ul polled %d dl polled %d\n",
		   ep->eid, ep->ul_is_polled, ep->dl_is_polled);

	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
		ep->tx_credit_flow_enabled = false;
		ath10k_dbg(ATH10K_DBG_BOOT,
			   "boot htc service '%s' eid %d TX flow control disabled\n",
			   htc_service_name(ep->service_id), assigned_eid);
	}

	return status;
}
static void
ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
{
	struct ar5416_desc *ads = AR5416DESC(ds);
	u32 ctl1, ctl6;

	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
	ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
	ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;

	ACCESS_ONCE(ads->ds_link) = i->link;
	ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];

	ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
	ctl6 = SM(i->keytype, AR_EncrType);

	if (AR_SREV_9285(ah)) {
		ads->ds_ctl8 = 0;
		ads->ds_ctl9 = 0;
		ads->ds_ctl10 = 0;
		ads->ds_ctl11 = 0;
	}

	if ((i->is_first || i->is_last) &&
	    i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
		ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
			| set11nTries(i->rates, 1)
			| set11nTries(i->rates, 2)
			| set11nTries(i->rates, 3)
			| (i->dur_update ? AR_DurUpdateEna : 0)
			| SM(0, AR_BurstDur);

		ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
			| set11nRate(i->rates, 1)
			| set11nRate(i->rates, 2)
			| set11nRate(i->rates, 3);
	} else {
		ACCESS_ONCE(ads->ds_ctl2) = 0;
		ACCESS_ONCE(ads->ds_ctl3) = 0;
	}

	if (!i->is_first) {
		ACCESS_ONCE(ads->ds_ctl0) = 0;
		ACCESS_ONCE(ads->ds_ctl1) = ctl1;
		ACCESS_ONCE(ads->ds_ctl6) = ctl6;
		return;
	}

	ctl1 |= (i->keyix != ATH9K_TXKEYIX_INVALID ? SM(i->keyix, AR_DestIdx) : 0)
		| SM(i->type, AR_FrameType)
		| (i->flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
		| (i->flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
		| (i->flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);

	switch (i->aggr) {
	case AGGR_BUF_FIRST:
		ctl6 |= SM(i->aggr_len, AR_AggrLen);
		/* fall through */
	case AGGR_BUF_MIDDLE:
		ctl1 |= AR_IsAggr | AR_MoreAggr;
		ctl6 |= SM(i->ndelim, AR_PadDelim);
		break;
	case AGGR_BUF_LAST:
		ctl1 |= AR_IsAggr;
		break;
	case AGGR_BUF_NONE:
		break;
	}

	ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
		| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
		| SM(i->txpower, AR_XmitPower)
		| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
		| (i->flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
		| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
		| (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
		| (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
		   (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));

	ACCESS_ONCE(ads->ds_ctl1) = ctl1;
	ACCESS_ONCE(ads->ds_ctl6) = ctl6;

	if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
		return;

	ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
		| set11nPktDurRTSCTS(i->rates, 1);

	ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
		| set11nPktDurRTSCTS(i->rates, 3);

	ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
		| set11nRateFlags(i->rates, 1)
		| set11nRateFlags(i->rates, 2)
		| set11nRateFlags(i->rates, 3)
		| SM(i->rtscts_rate, AR_RTSCTSRate);
}
Exemple #6
0
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
{
	struct ath_common *common = ath9k_hw_common(ah);
	struct ath9k_tx_queue_info *qi;
	u32 cwMin, chanCwMin, value;

	qi = &ah->txq[q];
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
		ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
		return true;
	}

	ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);

	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
		chanCwMin = INIT_CWMIN;

		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
	} else
		cwMin = qi->tqi_cwmin;

	ENABLE_REGWRITE_BUFFER(ah);

	REG_WRITE(ah, AR_DLCL_IFS(q),
		  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
		  SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
		  SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));

	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
		  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
		  SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
		  SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));

	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);

	if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah))
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
	else
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);

	if (qi->tqi_cbrPeriod) {
		REG_WRITE(ah, AR_QCBRCFG(q),
			  SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
			  SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
			    (qi->tqi_cbrOverflowLimit ?
			     AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
	}
	if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
			  AR_Q_RDYTIMECFG_EN);
	}

	REG_WRITE(ah, AR_DCHNTIME(q),
		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
		  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));

	if (qi->tqi_burstTime
	    && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);

	if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);

	REGWRITE_BUFFER_FLUSH(ah);

	if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);

	switch (qi->tqi_type) {
	case ATH9K_TX_QUEUE_BEACON:
		ENABLE_REGWRITE_BUFFER(ah);

		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_BEACON_USE
			    | AR_Q_MISC_CBR_INCR_DIS1);

		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
			    | AR_D_MISC_BEACON_USE
			    | AR_D_MISC_POST_FR_BKOFF_DIS);

		REGWRITE_BUFFER_FLUSH(ah);

		/*
		 * cwmin and cwmax should be 0 for beacon queue
		 * but not for IBSS as we would create an imbalance
		 * on beaconing fairness for participating nodes.
		 */
		if (AR_SREV_9300_20_OR_LATER(ah) &&
		    ah->opmode != NL80211_IFTYPE_ADHOC) {
			REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
				  | SM(0, AR_D_LCL_IFS_CWMAX)
				  | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
		}
		break;
	case ATH9K_TX_QUEUE_CAB:
		ENABLE_REGWRITE_BUFFER(ah);

		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_CBR_INCR_DIS1
			    | AR_Q_MISC_CBR_INCR_DIS0);
		value = (qi->tqi_readyTime -
			 (ah->config.sw_beacon_response_time -
			  ah->config.dma_beacon_response_time)) * 1024;
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  value | AR_Q_RDYTIMECFG_EN);
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S));

		REGWRITE_BUFFER_FLUSH(ah);

		break;
	case ATH9K_TX_QUEUE_PSPOLL:
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
		break;
	case ATH9K_TX_QUEUE_UAPSD:
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
		break;
	default:
		break;
	}

	if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
		REG_SET_BIT(ah, AR_DMISC(q),
			    SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
			       AR_D_MISC_ARB_LOCKOUT_CNTRL) |
			    AR_D_MISC_POST_FR_BKOFF_DIS);
	}

	if (AR_SREV_9300_20_OR_LATER(ah))
		REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);

	ath9k_hw_clear_queue_interrupts(ah, q);
	if (qi->tqi_qflags & TXQ_FLAG_TXINT_ENABLE) {
		ah->txok_interrupt_mask |= 1 << q;
		ah->txerr_interrupt_mask |= 1 << q;
	}
	if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
		ah->txdesc_interrupt_mask |= 1 << q;
	if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
		ah->txeol_interrupt_mask |= 1 << q;
	if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
		ah->txurn_interrupt_mask |= 1 << q;
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
HAL_BOOL ar5416SetupTxDesc_20(struct ath_hal *ah, struct ath_tx_desc *ds,
			      a_uint32_t pktLen,
			      a_uint32_t hdrLen,
			      HAL_PKT_TYPE type,
			      a_uint32_t txPower,
			      a_uint32_t txRate0, a_uint32_t txTries0,
			      a_uint32_t keyIx,
			      a_uint32_t antMode,
			      a_uint32_t flags,
			      a_uint32_t rtsctsRate,
			      a_uint32_t rtsctsDuration,
			      a_uint32_t compicvLen,
			      a_uint32_t compivLen,
			      a_uint32_t comp)
{
#define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)

        struct ar5416_desc *ads = AR5416DESC(ds);
        (void) hdrLen;

        ads->ds_txstatus9 &= ~AR_TxDone;

        HALASSERT(txTries0 != 0);
        HALASSERT(isValidPktType(type));
        HALASSERT(isValidTxRate(txRate0));
        HALASSERT((flags & RTSCTS) != RTSCTS);

        if (txPower > 63)
		txPower=63;

        ads->ds_ctl0 = (pktLen & AR_FrameLen)
		| (txPower << AR_XmitPower_S)
		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);

        ads->ds_ctl1 = (type << AR_FrameType_S)
		| (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
        ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0);
        ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S);

        ads->ds_ctl7 = SM(AR5416_LEGACY_CHAINMASK, AR_ChainSel0) 
		| SM(AR5416_LEGACY_CHAINMASK, AR_ChainSel1)
		| SM(AR5416_LEGACY_CHAINMASK, AR_ChainSel2) 
		| SM(AR5416_LEGACY_CHAINMASK, AR_ChainSel3);

        if (keyIx != HAL_TXKEYIX_INVALID) {
                /* XXX validate key index */
                ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
                ads->ds_ctl0 |= AR_DestIdxValid;
        }

        if (flags & RTSCTS) {
                if (!isValidTxRate(rtsctsRate)) {
                        return AH_FALSE;
                }
                /* XXX validate rtsctsDuration */
                ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
                ads->ds_ctl2 |= SM(rtsctsDuration, AR_BurstDur);
                ads->ds_ctl3 |= (rtsctsRate << AR_RTSCTSRate_S);
        }
        return AH_TRUE;

#undef RTSCTS
}
Exemple #8
0
static HAL_BOOL
ar2317SetPowerTable(struct ath_hal *ah,
	int16_t *minPower, int16_t *maxPower,
	const struct ieee80211_channel *chan, 
	uint16_t *rfXpdGain)
{
	struct ath_hal_5212 *ahp = AH5212(ah);
	const HAL_EEPROM *ee = AH_PRIVATE(ah)->ah_eeprom;
	const RAW_DATA_STRUCT_2317 *pRawDataset = AH_NULL;
	uint16_t pdGainOverlap_t2;
	int16_t minCalPower2317_t2;
	uint16_t *pdadcValues = ahp->ah_pcdacTable;
	uint16_t gainBoundaries[4];
	uint32_t reg32, regoffset;
	int i, numPdGainsUsed;
#ifndef AH_USE_INIPDGAIN
	uint32_t tpcrg1;
#endif

	HALDEBUG(ah, HAL_DEBUG_RFPARAM, "%s: chan 0x%x flag 0x%x\n",
	    __func__, chan->ic_freq, chan->ic_flags);

	if (IEEE80211_IS_CHAN_G(chan) || IEEE80211_IS_CHAN_108G(chan))
		pRawDataset = &ee->ee_rawDataset2413[headerInfo11G];
	else if (IEEE80211_IS_CHAN_B(chan))
		pRawDataset = &ee->ee_rawDataset2413[headerInfo11B];
	else {
		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: illegal mode\n", __func__);
		return AH_FALSE;
	}

	pdGainOverlap_t2 = (uint16_t) SM(OS_REG_READ(ah, AR_PHY_TPCRG5),
					  AR_PHY_TPCRG5_PD_GAIN_OVERLAP);
    
	numPdGainsUsed = ar2317getGainBoundariesAndPdadcsForPowers(ah,
		chan->channel, pRawDataset, pdGainOverlap_t2,
		&minCalPower2317_t2,gainBoundaries, rfXpdGain, pdadcValues);
	HALASSERT(1 <= numPdGainsUsed && numPdGainsUsed <= 3);

#ifdef AH_USE_INIPDGAIN
	/*
	 * Use pd_gains curve from eeprom; Atheros always uses
	 * the default curve from the ini file but some vendors
	 * (e.g. Zcomax) want to override this curve and not
	 * honoring their settings results in tx power 5dBm low.
	 */
	OS_REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, 
			 (pRawDataset->pDataPerChannel[0].numPdGains - 1));
#else
	tpcrg1 = OS_REG_READ(ah, AR_PHY_TPCRG1);
	tpcrg1 = (tpcrg1 &~ AR_PHY_TPCRG1_NUM_PD_GAIN)
		  | SM(numPdGainsUsed-1, AR_PHY_TPCRG1_NUM_PD_GAIN);
	switch (numPdGainsUsed) {
	case 3:
		tpcrg1 &= ~AR_PHY_TPCRG1_PDGAIN_SETTING3;
		tpcrg1 |= SM(rfXpdGain[2], AR_PHY_TPCRG1_PDGAIN_SETTING3);
		/* fall thru... */
	case 2:
		tpcrg1 &= ~AR_PHY_TPCRG1_PDGAIN_SETTING2;
		tpcrg1 |= SM(rfXpdGain[1], AR_PHY_TPCRG1_PDGAIN_SETTING2);
		/* fall thru... */
	case 1:
		tpcrg1 &= ~AR_PHY_TPCRG1_PDGAIN_SETTING1;
		tpcrg1 |= SM(rfXpdGain[0], AR_PHY_TPCRG1_PDGAIN_SETTING1);
		break;
	}
#ifdef AH_DEBUG
	if (tpcrg1 != OS_REG_READ(ah, AR_PHY_TPCRG1))
		HALDEBUG(ah, HAL_DEBUG_RFPARAM, "%s: using non-default "
		    "pd_gains (default 0x%x, calculated 0x%x)\n",
		    __func__, OS_REG_READ(ah, AR_PHY_TPCRG1), tpcrg1);
#endif
	OS_REG_WRITE(ah, AR_PHY_TPCRG1, tpcrg1);
#endif

	/*
	 * Note the pdadc table may not start at 0 dBm power, could be
	 * negative or greater than 0.  Need to offset the power
	 * values by the amount of minPower for griffin
	 */
	if (minCalPower2317_t2 != 0)
		ahp->ah_txPowerIndexOffset = (int16_t)(0 - minCalPower2317_t2);
	else
		ahp->ah_txPowerIndexOffset = 0;

	/* Finally, write the power values into the baseband power table */
	regoffset = 0x9800 + (672 <<2); /* beginning of pdadc table in griffin */
	for (i = 0; i < 32; i++) {
		reg32 = ((pdadcValues[4*i + 0] & 0xFF) << 0)  | 
			((pdadcValues[4*i + 1] & 0xFF) << 8)  |
			((pdadcValues[4*i + 2] & 0xFF) << 16) |
			((pdadcValues[4*i + 3] & 0xFF) << 24) ;        
		OS_REG_WRITE(ah, regoffset, reg32);
		regoffset += 4;
	}

	OS_REG_WRITE(ah, AR_PHY_TPCRG5, 
		     SM(pdGainOverlap_t2, AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | 
		     SM(gainBoundaries[0], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) |
		     SM(gainBoundaries[1], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) |
		     SM(gainBoundaries[2], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) |
		     SM(gainBoundaries[3], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));

	return AH_TRUE;
}
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct device *dev = htt->ar->dev;
	struct htt_cmd *cmd;
	struct htt_data_tx_desc_frag *tx_frags;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct sk_buff *txdesc = NULL;
	bool use_frags;
	u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
	u8 tid;
	int prefetch_len, desc_len;
	int msdu_id = -1;
	int res;
	u8 flags0;
	u16 flags1;

	res = ath10k_htt_tx_inc_pending(htt);
	if (res)
		goto err;

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt);
	if (res < 0) {
		spin_unlock_bh(&htt->tx_lock);
		goto err_tx_dec;
	}
	msdu_id = res;
	htt->pending_tx[msdu_id] = msdu;
	spin_unlock_bh(&htt->tx_lock);

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

	desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;

	txdesc = ath10k_htc_alloc_skb(desc_len);
	if (!txdesc) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}

	/* Since HTT 3.0 there is no separate mgmt tx command. However in case
	 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
	 * fragment list host driver specifies directly frame pointer. */
	use_frags = htt->target_version_major < 3 ||
		    !ieee80211_is_mgmt(hdr->frame_control);

	if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
		ath10k_warn("htt alignment check failed. dropping packet.\n");
		res = -EIO;
		goto err_free_txdesc;
	}

	if (use_frags) {
		skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
		skb_cb->htt.pad_len = (unsigned long)msdu->data -
				      round_down((unsigned long)msdu->data, 4);

		skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
	} else {
		skb_cb->htt.frag_len = 0;
		skb_cb->htt.pad_len = 0;
	}

	res = ath10k_skb_map(dev, msdu);
	if (res)
		goto err_pull_txfrag;

	if (use_frags) {
		dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
					DMA_TO_DEVICE);

		/* tx fragment list must be terminated with zero-entry */
		tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
		tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
						  skb_cb->htt.frag_len +
						  skb_cb->htt.pad_len);
		tx_frags[0].len   = __cpu_to_le32(msdu->len -
						  skb_cb->htt.frag_len -
						  skb_cb->htt.pad_len);
		tx_frags[1].paddr = __cpu_to_le32(0);
		tx_frags[1].len   = __cpu_to_le32(0);

		dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
					   DMA_TO_DEVICE);
	}

	ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
		   (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
			msdu->data, msdu->len);

	skb_put(txdesc, desc_len);
	cmd = (struct htt_cmd *)txdesc->data;

	tid = ATH10K_SKB_CB(msdu)->htt.tid;

	ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);

	flags0  = 0;
	if (!ieee80211_has_protected(hdr->frame_control))
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
	flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

	if (use_frags)
		flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
	else
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

	flags1  = 0;
	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;

	cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
	cmd->data_tx.flags0      = flags0;
	cmd->data_tx.flags1      = __cpu_to_le16(flags1);
	cmd->data_tx.len         = __cpu_to_le16(msdu->len -
						 skb_cb->htt.frag_len -
						 skb_cb->htt.pad_len);
	cmd->data_tx.id          = __cpu_to_le16(msdu_id);
	cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
	cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);

	memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);

	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	ath10k_skb_unmap(dev, msdu);
err_pull_txfrag:
	skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
err_free_txdesc:
	dev_kfree_skb_any(txdesc);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	htt->pending_tx[msdu_id] = NULL;
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt);
err:
	return res;
}
Exemple #10
0
/*
 * Enable radar detection and set the radar parameters per the
 * values in pe
 */
void
ar9300_enable_dfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe)
{
    u_int32_t val;
    struct ath_hal_private  *ahp = AH_PRIVATE(ah);
    HAL_CHANNEL_INTERNAL *ichan = ahp->ah_curchan;
    struct ath_hal_9300 *ah9300 = AH9300(ah);
    bool asleep = ah9300->ah_chip_full_sleep;
    int reg_writes = 0;

    if ((AR_SREV_WASP(ah) || AR_SREV_SCORPION(ah)) && asleep) {
        ar9300_set_power_mode(ah, HAL_PM_AWAKE, true);
    }

    val = OS_REG_READ(ah, AR_PHY_RADAR_0);
    val |= AR_PHY_RADAR_0_FFT_ENA | AR_PHY_RADAR_0_ENA;
    if (pe->pe_firpwr != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_FIRPWR;
        val |= SM(pe->pe_firpwr, AR_PHY_RADAR_0_FIRPWR);
    }
    if (pe->pe_rrssi != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_RRSSI;
        val |= SM(pe->pe_rrssi, AR_PHY_RADAR_0_RRSSI);
    }
    if (pe->pe_height != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_HEIGHT;
        val |= SM(pe->pe_height, AR_PHY_RADAR_0_HEIGHT);
    }
    if (pe->pe_prssi != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_PRSSI;
        if (AR_SREV_AR9580(ah) || AR_SREV_WASP(ah) || AR_SREV_SCORPION(ah)) {
            if (ah->ah_use_cac_prssi) {
                val |= SM(AR9300_DFS_PRSSI_CAC, AR_PHY_RADAR_0_PRSSI);
            } else {
                val |= SM(pe->pe_prssi, AR_PHY_RADAR_0_PRSSI);
            }
        } else {
            val |= SM(pe->pe_prssi, AR_PHY_RADAR_0_PRSSI);
        }
    }
    if (pe->pe_inband != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_INBAND;
        val |= SM(pe->pe_inband, AR_PHY_RADAR_0_INBAND);
    }
    OS_REG_WRITE(ah, AR_PHY_RADAR_0, val);

    val = OS_REG_READ(ah, AR_PHY_RADAR_1);
    val |= AR_PHY_RADAR_1_MAX_RRSSI | AR_PHY_RADAR_1_BLOCK_CHECK;
    if (pe->pe_maxlen != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_1_MAXLEN;
        val |= SM(pe->pe_maxlen, AR_PHY_RADAR_1_MAXLEN);
    }
    if (pe->pe_relstep != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_1_RELSTEP_THRESH;
        val |= SM(pe->pe_relstep, AR_PHY_RADAR_1_RELSTEP_THRESH);
    }
    if (pe->pe_relpwr != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_1_RELPWR_THRESH;
        val |= SM(pe->pe_relpwr, AR_PHY_RADAR_1_RELPWR_THRESH);
    }
    OS_REG_WRITE(ah, AR_PHY_RADAR_1, val);

    if (ath_hal_getcapability(ah, HAL_CAP_EXT_CHAN_DFS, 0, 0) == HAL_OK) {
        val = OS_REG_READ(ah, AR_PHY_RADAR_EXT);
        if (IS_CHAN_HT40(ichan)) {
            /* Enable extension channel radar detection */
            OS_REG_WRITE(ah, AR_PHY_RADAR_EXT, val | AR_PHY_RADAR_EXT_ENA);
        } else {
            /* HT20 mode, disable extension channel radar detect */
            OS_REG_WRITE(ah, AR_PHY_RADAR_EXT, val & ~AR_PHY_RADAR_EXT_ENA);
        }
    }
    /*
        apply DFS postamble array from INI
        column 0 is register ID, column 1 is  HT20 value, colum2 is HT40 value
    */

    if (AR_SREV_AR9580(ah) || AR_SREV_WASP(ah) || AR_SREV_OSPREY_22(ah) || AR_SREV_SCORPION(ah)) {
        REG_WRITE_ARRAY(&ah9300->ah_ini_dfs,IS_CHAN_HT40(ichan)? 2:1, reg_writes);
    }
#ifdef ATH_HAL_DFS_CHIRPING_FIX_APH128
    HDPRINTF(ah, HAL_DBG_DFS,"DFS change the timing value\n");
    if (AR_SREV_AR9580(ah) && IS_CHAN_HT40(ichan)) {
        OS_REG_WRITE(ah, AR_PHY_TIMING6, 0x3140c00a);	
    }
#endif
    if ((AR_SREV_WASP(ah) || AR_SREV_SCORPION(ah)) && asleep) {
        ar9300_set_power_mode(ah, HAL_PM_FULL_SLEEP, true);
    }
}
Exemple #11
0
	{ "index=[int]", "screenline=[int]" };
static char *T_scrollVertical[] =
        { "direction={forwards,backwards,goto}",
	  "unit={file,page,line}", "amount=int" };

/* Instance Variables */

static vardecl var_view[] =
{ IV(NAME_editor, "editor", IV_GET,
     NAME_delegate, "Editor displayed")
};

/* Send Methods */

static senddecl send_view[] =
{ SM(NAME_editor, 1, "editor", editorView,
     DEFAULT, "Associate editor with view"),
  SM(NAME_initialise, 4, T_initialise, initialiseView,
     DEFAULT, "Create from label, size, display and editor"),
  SM(NAME_requestGeometry, 4, T_requestGeometry, requestGeometryView,
     DEFAULT, "Map size to character units"),
  SM(NAME_unlink, 0, NULL, unlinkView,
     DEFAULT, "Unlink the editor"),
  SM(NAME_clear, 0, NULL, clearView,
     NAME_delete, "Overrule window behaviour"),
  SM(NAME_format, 2, T_format, formatView,
     NAME_format, "Formatted insert (see `string->format')"),
  SM(NAME_normalise, 2, T_fromAint_toAint, normaliseView,
     NAME_scroll, "Overrule window behaviour"),
  SM(NAME_scrollTo, 2, T_scrollTo, scrollToView,
     NAME_scroll, "Overrule window behaviour"),
  SM(NAME_scrollVertical, 3, T_scrollVertical, scrollVerticalView,
Exemple #12
0
void
ar9300EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe)
{
    u_int32_t val;
    struct ath_hal_private  *ahp = AH_PRIVATE(ah);
    HAL_CHANNEL_INTERNAL *ichan = ahp->ah_curchan;

    val = OS_REG_READ(ah, AR_PHY_RADAR_0);
    if (pe->pe_firpwr != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_FIRPWR;
        val |= SM(pe->pe_firpwr, AR_PHY_RADAR_0_FIRPWR);
    }
    if (pe->pe_rrssi != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_RRSSI;
        val |= SM(pe->pe_rrssi, AR_PHY_RADAR_0_RRSSI);
    }
    if (pe->pe_height != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_HEIGHT;
        val |= SM(pe->pe_height, AR_PHY_RADAR_0_HEIGHT);
    }
    if (pe->pe_prssi != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_PRSSI;

        if (AR_SREV_AR9580(ah)) {
            if (ah->ah_use_cac_prssi) {
                val |= SM(AR9300_DFS_PRSSI_CAC, AR_PHY_RADAR_0_PRSSI);
            } else {
                val |= SM(pe->pe_prssi, AR_PHY_RADAR_0_PRSSI);
            }
        } else {
            val |= SM(pe->pe_prssi, AR_PHY_RADAR_0_PRSSI);
        }
    }
    if (pe->pe_inband != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_0_INBAND;
        val |= SM(pe->pe_inband, AR_PHY_RADAR_0_INBAND);
    }

    /*Enable FFT data*/
    val |= AR_PHY_RADAR_0_FFT_ENA;

    OS_REG_WRITE(ah, AR_PHY_RADAR_0, val | AR_PHY_RADAR_0_ENA);

    val = OS_REG_READ(ah, AR_PHY_RADAR_1);
    val |= (AR_PHY_RADAR_1_MAX_RRSSI | AR_PHY_RADAR_1_BLOCK_CHECK);

    if (pe->pe_maxlen != HAL_PHYERR_PARAM_NOVAL) {
        val &= ~AR_PHY_RADAR_1_MAXLEN;
        val |= SM(pe->pe_maxlen, AR_PHY_RADAR_1_MAXLEN);
    }

    OS_REG_WRITE(ah, AR_PHY_RADAR_1, val);

    if (ath_hal_getcapability(ah, HAL_CAP_EXT_CHAN_DFS, 0, 0) == HAL_OK) {
        if (IS_CHAN_HT40(ichan)) {
            /*Enable extension channel radar detection*/
            val = OS_REG_READ(ah, AR_PHY_RADAR_EXT);
            OS_REG_WRITE(ah, AR_PHY_RADAR_EXT, val | AR_PHY_RADAR_EXT_ENA);
        } else {
            /*HT20 mode, disable extension channel radar detect*/
            val = OS_REG_READ(ah, AR_PHY_RADAR_EXT);
            OS_REG_WRITE(ah, AR_PHY_RADAR_EXT, val & ~AR_PHY_RADAR_EXT_ENA);
        }
    }

    if (pe->pe_relstep != HAL_PHYERR_PARAM_NOVAL) {
        val = OS_REG_READ(ah, AR_PHY_RADAR_1);
        val &= ~AR_PHY_RADAR_1_RELSTEP_THRESH;
        val |= SM(pe->pe_relstep, AR_PHY_RADAR_1_RELSTEP_THRESH);
        OS_REG_WRITE(ah, AR_PHY_RADAR_1, val);
    }
    if (pe->pe_relpwr != HAL_PHYERR_PARAM_NOVAL) {
        val = OS_REG_READ(ah, AR_PHY_RADAR_1);
        val &= ~AR_PHY_RADAR_1_RELPWR_THRESH;
        val |= SM(pe->pe_relpwr, AR_PHY_RADAR_1_RELPWR_THRESH);
        OS_REG_WRITE(ah, AR_PHY_RADAR_1, val);
    }
}
Exemple #13
0
/* Instance Variables */

static vardecl var_assign[] =
{ IV(NAME_var, "var", IV_BOTH,
     NAME_storage, "Variable to be bound"),
  IV(NAME_value, "any|function", IV_BOTH,
     NAME_storage, "Value to give to the assignment"),
  IV(NAME_scope, "{local,outer,global}", IV_BOTH,
     NAME_scope, "Scope of assignment")
};

/* Send Methods */

static senddecl send_assign[] =
{ SM(NAME_Execute, 0, NULL, ExecuteAssignment,
     DEFAULT, "Bind the variable"),
  SM(NAME_initialise, 3, T_initialise, initialiseAssignment,
     DEFAULT, "Create assignment from var and value")
};

/* Get Methods */

#define get_assign NULL
/*
static getdecl get_assign[] =
{
};
*/

/* Resources */
Exemple #14
0
/**
 * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock
 * @ar: the ath10k blob
 *
 * This function is very hardware specific, the clock initialization
 * steps is very sensitive and could lead to unknown crash, so they
 * should be done in sequence.
 *
 * *** Be aware if you planned to refactor them. ***
 *
 * Return: 0 if successfully enable the pll, otherwise EINVAL
 */
static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
{
	int ret, wait_limit;
	u32 clk_div_addr, pll_init_addr, speed_addr;
	u32 addr, reg_val, mem_val;
	struct ath10k_hw_params *hw;
	const struct ath10k_hw_clk_params *hw_clk;

	hw = &ar->hw_params;

	if (ar->regs->core_clk_div_address == 0 ||
	    ar->regs->cpu_pll_init_address == 0 ||
	    ar->regs->cpu_speed_address == 0)
		return -EINVAL;

	clk_div_addr = ar->regs->core_clk_div_address;
	pll_init_addr = ar->regs->cpu_pll_init_address;
	speed_addr = ar->regs->cpu_speed_address;

	/* Read efuse register to find out the right hw clock configuration */
	addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET);
	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
	if (ret)
		return -EINVAL;

	/* sanitize if the hw refclk index is out of the boundary */
	if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT)
		return -EINVAL;

	hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)];

	/* Set the rnfrac and outdiv params to bb_pll register */
	addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET);
	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
	if (ret)
		return -EINVAL;

	reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK);
	reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) |
		    SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV));
	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
	if (ret)
		return -EINVAL;

	/* Set the correct settle time value to pll_settle register */
	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET);
	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
	if (ret)
		return -EINVAL;

	reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK;
	reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME);
	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
	if (ret)
		return -EINVAL;

	/* Set the clock_ctrl div to core_clk_ctrl register */
	addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET);
	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
	if (ret)
		return -EINVAL;

	reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK;
	reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV);
	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
	if (ret)
		return -EINVAL;

	/* Set the clock_div register */
	mem_val = 1;
	ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val,
				      sizeof(mem_val));
	if (ret)
		return -EINVAL;

	/* Configure the pll_control register */
	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
	if (ret)
		return -EINVAL;

	reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) |
		    SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) |
		    SM(1, WLAN_PLL_CONTROL_NOPWD));
	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
	if (ret)
		return -EINVAL;

	/* busy wait (max 1s) the rtc_sync status register indicate ready */
	wait_limit = 100000;
	addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
	do {
		ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
		if (ret)
			return -EINVAL;

		if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
			break;

		wait_limit--;
		udelay(10);

	} while (wait_limit > 0);

	if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
		return -EINVAL;

	/* Unset the pll_bypass in pll_control register */
	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
	if (ret)
		return -EINVAL;

	reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK;
	reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS);
	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
	if (ret)
		return -EINVAL;

	/* busy wait (max 1s) the rtc_sync status register indicate ready */
	wait_limit = 100000;
	addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
	do {
		ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
		if (ret)
			return -EINVAL;

		if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
			break;

		wait_limit--;
		udelay(10);

	} while (wait_limit > 0);

	if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
		return -EINVAL;

	/* Enable the hardware cpu clock register */
	addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET);
	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
	if (ret)
		return -EINVAL;

	reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK;
	reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD);
	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
	if (ret)
		return -EINVAL;

	/* unset the nopwd from pll_control register */
	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
	ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
	if (ret)
		return -EINVAL;

	reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK;
	ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
	if (ret)
		return -EINVAL;

	/* enable the pll_init register */
	mem_val = 1;
	ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val,
				      sizeof(mem_val));
	if (ret)
		return -EINVAL;

	/* set the target clock frequency to speed register */
	ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq,
				      sizeof(hw->target_cpu_freq));
	if (ret)
		return -EINVAL;

	return 0;
}
Exemple #15
0
void
ar5416ConfigureSpectralScan(struct ath_hal *ah, HAL_SPECTRAL_PARAM *ss)
{
	uint32_t val;

	ar5416PrepSpectralScan(ah);

	val = OS_REG_READ(ah, AR_PHY_SPECTRAL_SCAN);

	if (ss->ss_fft_period != HAL_SPECTRAL_PARAM_NOVAL) {
		val &= ~AR_PHY_SPECTRAL_SCAN_FFT_PERIOD;
		val |= SM(ss->ss_fft_period, AR_PHY_SPECTRAL_SCAN_FFT_PERIOD);
	}

	if (ss->ss_period != HAL_SPECTRAL_PARAM_NOVAL) {
		val &= ~AR_PHY_SPECTRAL_SCAN_PERIOD;
		val |= SM(ss->ss_period, AR_PHY_SPECTRAL_SCAN_PERIOD);
	}

	if (ss->ss_period != HAL_SPECTRAL_PARAM_NOVAL) {
		val &= ~AR_PHY_SPECTRAL_SCAN_PERIOD;
		val |= SM(ss->ss_period, AR_PHY_SPECTRAL_SCAN_PERIOD);
	}

	/* This section is different for Kiwi and Merlin */
	if (AR_SREV_MERLIN(ah) ) {
		if (ss->ss_count != HAL_SPECTRAL_PARAM_NOVAL) {
			val &= ~AR_PHY_SPECTRAL_SCAN_COUNT;
			val |= SM(ss->ss_count, AR_PHY_SPECTRAL_SCAN_COUNT);
		}

		if (ss->ss_short_report == AH_TRUE) {
			val |= AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT;
		} else if (ss->ss_short_report != HAL_SPECTRAL_PARAM_NOVAL) {
			val &= ~AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT;
		}
	} else {
		if (ss->ss_count != HAL_SPECTRAL_PARAM_NOVAL) {
			/*
			 * In Merlin, for continous scan, scan_count = 128.
			 * In case of Kiwi, this value should be 0
			 */
			if (ss->ss_count == 128)
				ss->ss_count = 0;
			val &= ~AR_PHY_SPECTRAL_SCAN_COUNT_KIWI;
			val |= SM(ss->ss_count, AR_PHY_SPECTRAL_SCAN_COUNT_KIWI);
		}

		if (ss->ss_short_report == AH_TRUE) {
			val |= AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_KIWI;
		} else if (ss->ss_short_report != HAL_SPECTRAL_PARAM_NOVAL) {
			val &= ~AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_KIWI;
		}

		//Select the mask to be same as before
		val |= AR_PHY_SPECTRAL_SCAN_PHYERR_MASK_SELECT_KIWI;
	}
	// Enable spectral scan
	OS_REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val | AR_PHY_SPECTRAL_SCAN_ENA);

	ar5416GetSpectralParams(ah, ss);
}
void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
		      bool is_full_sleep)
{
	struct ath_common *common = ath9k_hw_common(ah);
	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
	u32 regval;

	ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
		is_full_sleep, is_2g);

	if (!mci->gpm_addr && !mci->sched_addr) {
		ath_dbg(common, MCI,
			"MCI GPM and schedule buffers are not allocated\n");
		return;
	}

	if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
		ath_dbg(common, MCI, "BTCOEX control register is dead\n");
		return;
	}

	
	REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
	REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
	REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);


	regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
		 SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
		 SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
		 SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
		 SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
		 SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
		 SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
		 SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
		 SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);

	REG_WRITE(ah, AR_BTCOEX_CTRL, regval);

	if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
		ar9003_mci_osla_setup(ah, true);
	else
		ar9003_mci_osla_setup(ah, false);

	REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
		    AR_BTCOEX_CTRL_SPDT_ENABLE);
	REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
		      AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);

	REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
	REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);

	regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
	REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval);
	REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);

	
	regval = REG_READ(ah, AR_MCI_COMMAND2);
	regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
	REG_WRITE(ah, AR_MCI_COMMAND2, regval);

	udelay(1);

	regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
	REG_WRITE(ah, AR_MCI_COMMAND2, regval);

	if (is_full_sleep) {
		ar9003_mci_mute_bt(ah);
		udelay(100);
	}

	regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
	REG_WRITE(ah, AR_MCI_COMMAND2, regval);
	udelay(1);
	regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
	REG_WRITE(ah, AR_MCI_COMMAND2, regval);

	ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);

	REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
		  (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
		   SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));

	REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
		    AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);

	ar9003_mci_observation_set_up(ah);

	mci->ready = true;
	ar9003_mci_prep_interface(ah);

	if (en_int)
		ar9003_mci_enable_interrupt(ah);
}
Exemple #17
0
/*
 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
 */
HAL_BOOL
ar5211ResetTxQueue(struct ath_hal *ah, u_int q)
{
	struct ath_hal_5211 *ahp = AH5211(ah);
	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
	HAL_TX_QUEUE_INFO *qi;
	uint32_t cwMin, chanCwMin, value;

	if (q >= HAL_NUM_TX_QUEUES) {
		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
		    __func__, q);
		return AH_FALSE;
	}
	qi = &ahp->ah_txq[q];
	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
		    __func__, q);
		return AH_TRUE;		/* XXX??? */
	}

	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
		/*
		 * Select cwmin according to channel type.
		 * NB: chan can be NULL during attach
		 */
		if (chan && IEEE80211_IS_CHAN_B(chan))
			chanCwMin = INIT_CWMIN_11B;
		else
			chanCwMin = INIT_CWMIN;
		/* make sure that the CWmin is of the form (2^n - 1) */
		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
			;
	} else
		cwMin = qi->tqi_cwmin;

	/* set cwMin/Max and AIFS values */
	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));

	/* Set retry limit values */
	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q), 
		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
	);

	/* enable early termination on the QCU */
	OS_REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);

	if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU) {
		/* Configure DCU to use the global sequence count */
		OS_REG_WRITE(ah, AR_DMISC(q), AR5311_D_MISC_SEQ_NUM_CONTROL);
	}
	/* multiqueue support */
	if (qi->tqi_cbrPeriod) {
		OS_REG_WRITE(ah, AR_QCBRCFG(q), 
			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
		OS_REG_WRITE(ah, AR_QMISC(q),
			OS_REG_READ(ah, AR_QMISC(q)) |
			AR_Q_MISC_FSP_CBR |
			(qi->tqi_cbrOverflowLimit ?
				AR_Q_MISC_CBR_EXP_CNTR_LIMIT : 0));
	}
	if (qi->tqi_readyTime) {
		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
			SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) | 
			AR_Q_RDYTIMECFG_EN);
	}
	if (qi->tqi_burstTime) {
		OS_REG_WRITE(ah, AR_DCHNTIME(q),
			SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
			AR_D_CHNTIME_EN);
		if (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE) {
			OS_REG_WRITE(ah, AR_QMISC(q),
			     OS_REG_READ(ah, AR_QMISC(q)) |
			     AR_Q_MISC_RDYTIME_EXP_POLICY);
		}
	}

	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE) {
		OS_REG_WRITE(ah, AR_DMISC(q),
			OS_REG_READ(ah, AR_DMISC(q)) |
			AR_D_MISC_POST_FR_BKOFF_DIS);
	}
	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE) {
		OS_REG_WRITE(ah, AR_DMISC(q),
			OS_REG_READ(ah, AR_DMISC(q)) |
			AR_D_MISC_FRAG_BKOFF_EN);
	}
	switch (qi->tqi_type) {
	case HAL_TX_QUEUE_BEACON:
		/* Configure QCU for beacons */
		OS_REG_WRITE(ah, AR_QMISC(q),
			OS_REG_READ(ah, AR_QMISC(q))
			| AR_Q_MISC_FSP_DBA_GATED
			| AR_Q_MISC_BEACON_USE
			| AR_Q_MISC_CBR_INCR_DIS1);
		/* Configure DCU for beacons */
		value = (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
			| AR_D_MISC_BEACON_USE | AR_D_MISC_POST_FR_BKOFF_DIS;
		if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU)
			value |= AR5311_D_MISC_SEQ_NUM_CONTROL;
		OS_REG_WRITE(ah, AR_DMISC(q), value);
		break;
	case HAL_TX_QUEUE_CAB:
		/* Configure QCU for CAB (Crap After Beacon) frames */
		OS_REG_WRITE(ah, AR_QMISC(q),
			OS_REG_READ(ah, AR_QMISC(q))
			| AR_Q_MISC_FSP_DBA_GATED | AR_Q_MISC_CBR_INCR_DIS1
			| AR_Q_MISC_CBR_INCR_DIS0 | AR_Q_MISC_RDYTIME_EXP_POLICY);

		value = (ahp->ah_beaconInterval
			- (ah->ah_config.ah_sw_beacon_response_time
			        - ah->ah_config.ah_dma_beacon_response_time)
			- ah->ah_config.ah_additional_swba_backoff) * 1024;
		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q), value | AR_Q_RDYTIMECFG_EN);

		/* Configure DCU for CAB */
		value = (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << AR_D_MISC_ARB_LOCKOUT_CNTRL_S);
		if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU)
			value |= AR5311_D_MISC_SEQ_NUM_CONTROL;
		OS_REG_WRITE(ah, AR_QMISC(q), value);
		break;
	default:
		/* NB: silence compiler */
		break;
	}

	/*
	 * Always update the secondary interrupt mask registers - this
	 * could be a new queue getting enabled in a running system or
	 * hw getting re-initialized during a reset!
	 *
	 * Since we don't differentiate between tx interrupts corresponding
	 * to individual queues - secondary tx mask regs are always unmasked;
	 * tx interrupts are enabled/disabled for all queues collectively
	 * using the primary mask reg
	 */
	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
		ahp->ah_txOkInterruptMask |= 1 << q;
	else
		ahp->ah_txOkInterruptMask &= ~(1 << q);
	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
		ahp->ah_txErrInterruptMask |= 1 << q;
	else
		ahp->ah_txErrInterruptMask &= ~(1 << q);
	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
		ahp->ah_txDescInterruptMask |= 1 << q;
	else
		ahp->ah_txDescInterruptMask &= ~(1 << q);
	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
		ahp->ah_txEolInterruptMask |= 1 << q;
	else
		ahp->ah_txEolInterruptMask &= ~(1 << q);
	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
		ahp->ah_txUrnInterruptMask |= 1 << q;
	else
		ahp->ah_txUrnInterruptMask &= ~(1 << q);
	setTxQInterrupts(ah, qi);

	return AH_TRUE;
}
bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
			     u32 *payload, u8 len, bool wait_done,
			     bool check_bt)
{
	struct ath_common *common = ath9k_hw_common(ah);
	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
	bool msg_sent = false;
	u32 regval;
	u32 saved_mci_int_en;
	int i;

	saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
	regval = REG_READ(ah, AR_BTCOEX_CTRL);

	if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
		ath_dbg(common, MCI,
			"MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n",
			header, (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
		ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
		return false;
	} else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
		ath_dbg(common, MCI,
			"MCI Don't send message 0x%x. BT is in sleep state\n",
			header);
		ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
		return false;
	}

	if (wait_done)
		REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);

	

	REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
		  (AR_MCI_INTERRUPT_SW_MSG_DONE |
		   AR_MCI_INTERRUPT_MSG_FAIL_MASK));

	if (payload) {
		for (i = 0; (i * 4) < len; i++)
			REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
				  *(payload + i));
	}

	REG_WRITE(ah, AR_MCI_COMMAND0,
		  (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
		      AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
		   SM(len, AR_MCI_COMMAND0_LEN) |
		   SM(header, AR_MCI_COMMAND0_HEADER)));

	if (wait_done &&
	    !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
					    AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
		ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
	else {
		ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
		msg_sent = true;
	}

	if (wait_done)
		REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);

	return msg_sent;
}
Exemple #19
0
	};

/* Instance Variables */

static vardecl var_grbox[] =
{ IV(NAME_graphical, "graphical", IV_GET,
     NAME_content, "Represented graphical object"),
  SV(NAME_alignment, "{top,center,bottom,left,right}", IV_GET|IV_STORE,
     alignmentGrBox,
     NAME_layout, "Alignment in paragraph")
};

/* Send Methods */

static senddecl send_grbox[] =
{ SM(NAME_initialise, 3, T_initialise, initialiseGrBox,
     DEFAULT, "Create grbox from graphical, alignment and rubber"),
  SM(NAME_compute, 0, NULL, computeGrBox,
     NAME_update, "Compute <-graphical and update dimensions")
};

/* Get Methods */

#define get_grbox NULL
/*
static getdecl get_grbox[] =
{
};
*/

/* Resources */
Exemple #20
0
static vardecl var_hyper[] =
{ IV(NAME_from, "object", IV_GET,
     NAME_client, "From side of hyper link"),
  IV(NAME_to, "object", IV_GET,
     NAME_client, "To side of hyper link"),
  IV(NAME_forwardName, "name", IV_BOTH,
     NAME_name, "Name as visible from <-from"),
  IV(NAME_backwardName, "name", IV_BOTH,
     NAME_name, "Name as visible from <-to")
};

/* Send Methods */

static senddecl send_hyper[] =
{ SM(NAME_initialise, 4, T_initialise, initialiseHyper,
     DEFAULT, "Create named link between objects"),
  SM(NAME_unlink, 0, NULL, unlinkHyper,
     DEFAULT, "Unlink hyper from objects"),
  SM(NAME_SaveRelation, 1, "file", SaveRelationHyper,
     NAME_file, "Consider saving relation (->save_in_file)"),
  SM(NAME_unlinkFrom, 0, NULL, unlinkFromHyper,
     NAME_internal, "<-from side is being unlinked"),
  SM(NAME_unlinkTo, 0, NULL, unlinkToHyper,
     NAME_internal, "<-to side is being unlinked")
};

/* Get Methods */

#define get_hyper NULL
/*
static getdecl get_hyper[] =
void ar5416Set11nRateScenario_20(struct ath_hal *ah, struct ath_tx_desc *ds,
				 a_uint32_t durUpdateEn, a_uint32_t rtsctsRate,
				 a_uint32_t rtsctsDuration,
				 HAL_11N_RATE_SERIES series[], a_uint32_t nseries,
				 a_uint32_t flags)
{
        struct ar5416_desc *ads = AR5416DESC(ds);
        a_uint32_t ds_ctl0;

        HALASSERT(nseries == 4);
        (void)nseries;

        // gnychis
//        series[0].Rate = 0x87;
        //a_uint32_t offset = 70;
        //char *data_ptr = ds->ds_data;
        //data_ptr[offset+0] = 0xff;
        //data_ptr[offset+1] = 0xff;
        //data_ptr[offset+2] = 0xff;
        //data_ptr[offset+3] = 0xff;
        //data_ptr[offset+4] = (char) series[0].Rate;
        //data_ptr[offset+5] = (char) series[0].Tries;
        //data_ptr[offset+6] = (char) series[0].RateFlags;
        //data_ptr[offset+7] = (char) series[0].RateIndex;

        //series[0] = 0x1A;
        //ar5416_writeDebug(ah, series[0].PktDuration>>8);

        /*
         * Rate control settings override
         */
	ds_ctl0 = ads->ds_ctl0;

        if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
		if (flags & HAL_TXDESC_RTSENA) {
			ds_ctl0 &= ~AR_CTSEnable;
			ds_ctl0 |= AR_RTSEnable;
		} else {
			ds_ctl0 &= ~AR_RTSEnable;
			ds_ctl0 |= AR_CTSEnable;
		}
        } else {
		ds_ctl0 = (ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
        }

	ads->ds_ctl0 = ds_ctl0;

        ads->ds_ctl2 = set11nTries(series, 0)
		|  set11nTries(series, 1)
		|  set11nTries(series, 2)
		|  set11nTries(series, 3)
		|  (durUpdateEn ? AR_DurUpdateEn : 0);

        ads->ds_ctl3 = set11nRate(series, 0)
		|  set11nRate(series, 1)
		|  set11nRate(series, 2)
		|  set11nRate(series, 3);

        ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
		|  set11nPktDurRTSCTS(series, 1);

        ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
		|  set11nPktDurRTSCTS(series, 3);

        ads->ds_ctl7 = set11nRateFlags(series, 0)
		|  set11nRateFlags(series, 1)
		|  set11nRateFlags(series, 2)
		|  set11nRateFlags(series, 3)
		| SM(rtsctsRate, AR_RTSCTSRate);
}
Exemple #22
0
static vardecl var_popupGesture[] =
{ IV(NAME_popup, "popup|function*", IV_BOTH,
     NAME_popup, "Popup displayed"),
  IV(NAME_current, "popup*", IV_NONE,
     NAME_popup, "Currently visible popup"),
  IV(NAME_context, "any", IV_BOTH,
     NAME_context, "Context to be send with the ->execute"),
  IV(NAME_maxDragDistance, "int*", IV_BOTH,
     NAME_cancel, "Cancel after dragging this far")
};

/* Send Methods */

static senddecl send_popupGesture[] =
{ SM(NAME_drag, 1, "event", dragPopupGesture,
     DEFAULT, "Pass drag events to popup"),
  SM(NAME_initialise, 3, T_initialise, initialisePopupGesture,
     DEFAULT, "Create from popup, button and modifier"),
  SM(NAME_initiate, 1, "event", initiatePopupGesture,
     DEFAULT, "Show popup"),
  SM(NAME_terminate, 1, "event", terminatePopupGesture,
     DEFAULT, "Unshow popup and execute selected item"),
  SM(NAME_verify, 1, "event", verifyPopupGesture,
     DEFAULT, "Verify popup can be activated"),
  SM(NAME_cancel, 1, "event", cancelPopupGesture,
     NAME_cancel, "Cancel this gesture and try the next"),
  SM(NAME_event, 1, "event", eventPopupGesture,
     NAME_accelerator, "Handle accelerators")
};

/* Get Methods */
Exemple #23
0
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct ath10k_hif_sg_item sg_items[2];
	struct htt_data_tx_desc_frag *frags;
	u8 vdev_id = skb_cb->vdev_id;
	u8 tid = skb_cb->htt.tid;
	int prefetch_len;
	int res;
	u8 flags0 = 0;
	u16 msdu_id, flags1 = 0;
	dma_addr_t paddr = 0;
	u32 frags_paddr = 0;
	struct htt_msdu_ext_desc *ext_desc = NULL;
	bool limit_mgmt_desc = false;
	bool is_probe_resp = false;

	if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
	    ar->hw_params.max_probe_resp_desc_thres) {
		limit_mgmt_desc = true;

		if (ieee80211_is_probe_resp(hdr->frame_control))
			is_probe_resp = true;
	}

	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
	if (res)
		goto err;

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
	spin_unlock_bh(&htt->tx_lock);
	if (res < 0) {
		goto err_tx_dec;
	}
	msdu_id = res;

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
					   &paddr);
	if (!skb_cb->htt.txbuf) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}
	skb_cb->htt.txbuf_paddr = paddr;

	if ((ieee80211_is_action(hdr->frame_control) ||
	     ieee80211_is_deauth(hdr->frame_control) ||
	     ieee80211_is_disassoc(hdr->frame_control)) &&
	     ieee80211_has_protected(hdr->frame_control)) {
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
	} else if (!skb_cb->htt.nohwcrypt &&
		   skb_cb->txmode == ATH10K_HW_TXRX_RAW &&
		   ieee80211_has_protected(hdr->frame_control)) {
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
	}

	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
	if (res) {
		res = -EIO;
		goto err_free_txbuf;
	}

	switch (skb_cb->txmode) {
	case ATH10K_HW_TXRX_RAW:
	case ATH10K_HW_TXRX_NATIVE_WIFI:
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
		/* pass through */
	case ATH10K_HW_TXRX_ETHERNET:
		if (ar->hw_params.continuous_frag_desc) {
			memset(&htt->frag_desc.vaddr[msdu_id], 0,
			       sizeof(struct htt_msdu_ext_desc));
			frags = (struct htt_data_tx_desc_frag *)
				&htt->frag_desc.vaddr[msdu_id].frags;
			ext_desc = &htt->frag_desc.vaddr[msdu_id];
			frags[0].tword_addr.paddr_lo =
				__cpu_to_le32(skb_cb->paddr);
			frags[0].tword_addr.paddr_hi = 0;
			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);

			frags_paddr =  htt->frag_desc.paddr +
				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
		} else {
			frags = skb_cb->htt.txbuf->frags;
			frags[0].dword_addr.paddr =
				__cpu_to_le32(skb_cb->paddr);
			frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
			frags[1].dword_addr.paddr = 0;
			frags[1].dword_addr.len = 0;

			frags_paddr = skb_cb->htt.txbuf_paddr;
		}
		flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
		break;
	case ATH10K_HW_TXRX_MGMT:
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

		frags_paddr = skb_cb->paddr;
		break;
	}

	/* Normally all commands go through HTC which manages tx credits for
	 * each endpoint and notifies when tx is completed.
	 *
	 * HTT endpoint is creditless so there's no need to care about HTC
	 * flags. In that case it is trivial to fill the HTC header here.
	 *
	 * MSDU transmission is considered completed upon HTT event. This
	 * implies no relevant resources can be freed until after the event is
	 * received. That's why HTC tx completion handler itself is ignored by
	 * setting NULL to transfer_context for all sg items.
	 *
	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
	 * as it's a waste of resources. By bypassing HTC it is possible to
	 * avoid extra memory allocations, compress data structures and thus
	 * improve performance. */

	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
			sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			sizeof(skb_cb->htt.txbuf->cmd_tx) +
			prefetch_len);
	skb_cb->htt.txbuf->htc_hdr.flags = 0;

	if (skb_cb->htt.nohwcrypt)
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	if (!skb_cb->is_protected)
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
		if (ar->hw_params.continuous_frag_desc)
			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
	}

	/* Prevent firmware from sending up tx inspection requests. There's
	 * nothing ath10k can do with frames requested for inspection so force
	 * it to simply rely a regular tx completion with discard status.
	 */
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;

	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
	skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);

	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
	ath10k_dbg(ar, ATH10K_DBG_HTT,
		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
		   (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
			msdu->data, msdu->len);
	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);

	sg_items[0].transfer_id = 0;
	sg_items[0].transfer_context = NULL;
	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
	sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
			    sizeof(skb_cb->htt.txbuf->frags);
	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_tx);

	sg_items[1].transfer_id = 0;
	sg_items[1].transfer_context = NULL;
	sg_items[1].vaddr = msdu->data;
	sg_items[1].paddr = skb_cb->paddr;
	sg_items[1].len = prefetch_len;

	res = ath10k_hif_tx_sg(htt->ar,
			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
			       sg_items, ARRAY_SIZE(sg_items));
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txbuf:
	dma_pool_free(htt->tx_pool,
		      skb_cb->htt.txbuf,
		      skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
	return res;
}
Exemple #24
0
void
ar9287_init_from_rom(struct athn_softc *sc, struct ieee80211_channel *c,
    struct ieee80211_channel *extc)
{
	const struct ar9287_eeprom *eep = sc->eep;
	const struct ar9287_modal_eep_header *modal = &eep->modalHeader;
	uint32_t reg, offset;
	int i;

	AR_WRITE(sc, AR_PHY_SWITCH_COM, modal->antCtrlCommon);

	for (i = 0; i < AR9287_MAX_CHAINS; i++) {
		offset = i * 0x1000;

		AR_WRITE(sc, AR_PHY_SWITCH_CHAIN_0 + offset,
		    modal->antCtrlChain[i]);

		reg = AR_READ(sc, AR_PHY_TIMING_CTRL4_0 + offset);
		reg = RW(reg, AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
		    modal->iqCalICh[i]);
		reg = RW(reg, AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
		    modal->iqCalQCh[i]);
		AR_WRITE(sc, AR_PHY_TIMING_CTRL4_0 + offset, reg);

		reg = AR_READ(sc, AR_PHY_GAIN_2GHZ + offset);
		reg = RW(reg, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
		    modal->bswMargin[i]);
		reg = RW(reg, AR_PHY_GAIN_2GHZ_XATTEN1_DB,
		    modal->bswAtten[i]);
		AR_WRITE(sc, AR_PHY_GAIN_2GHZ + offset, reg);

		reg = AR_READ(sc, AR_PHY_RXGAIN + offset);
		reg = RW(reg, AR9280_PHY_RXGAIN_TXRX_MARGIN,
		    modal->rxTxMarginCh[i]);
		reg = RW(reg, AR9280_PHY_RXGAIN_TXRX_ATTEN,
		    modal->txRxAttenCh[i]);
		AR_WRITE(sc, AR_PHY_RXGAIN + offset, reg);
	}

	reg = AR_READ(sc, AR_PHY_SETTLING);
#ifndef IEEE80211_NO_HT
	if (extc != NULL)
		reg = RW(reg, AR_PHY_SETTLING_SWITCH, modal->swSettleHt40);
	else
#endif
		reg = RW(reg, AR_PHY_SETTLING_SWITCH, modal->switchSettling);
	AR_WRITE(sc, AR_PHY_SETTLING, reg);

	reg = AR_READ(sc, AR_PHY_DESIRED_SZ);
	reg = RW(reg, AR_PHY_DESIRED_SZ_ADC, modal->adcDesiredSize);
	AR_WRITE(sc, AR_PHY_DESIRED_SZ, reg);

	reg  = SM(AR_PHY_RF_CTL4_TX_END_XPAA_OFF, modal->txEndToXpaOff);
	reg |= SM(AR_PHY_RF_CTL4_TX_END_XPAB_OFF, modal->txEndToXpaOff);
	reg |= SM(AR_PHY_RF_CTL4_FRAME_XPAA_ON, modal->txFrameToXpaOn);
	reg |= SM(AR_PHY_RF_CTL4_FRAME_XPAB_ON, modal->txFrameToXpaOn);
	AR_WRITE(sc, AR_PHY_RF_CTL4, reg);

	reg = AR_READ(sc, AR_PHY_RF_CTL3);
	reg = RW(reg, AR_PHY_TX_END_TO_A2_RX_ON, modal->txEndToRxOn);
	AR_WRITE(sc, AR_PHY_RF_CTL3, reg);

	reg = AR_READ(sc, AR_PHY_CCA(0));
	reg = RW(reg, AR9280_PHY_CCA_THRESH62, modal->thresh62);
	AR_WRITE(sc, AR_PHY_CCA(0), reg);

	reg = AR_READ(sc, AR_PHY_EXT_CCA0);
	reg = RW(reg, AR_PHY_EXT_CCA0_THRESH62, modal->thresh62);
	AR_WRITE(sc, AR_PHY_EXT_CCA0, reg);

	reg = AR_READ(sc, AR9287_AN_RF2G3_CH0);
	reg = RW(reg, AR9287_AN_RF2G3_DB1, modal->db1);
	reg = RW(reg, AR9287_AN_RF2G3_DB2, modal->db2);
	reg = RW(reg, AR9287_AN_RF2G3_OB_CCK, modal->ob_cck);
	reg = RW(reg, AR9287_AN_RF2G3_OB_PSK, modal->ob_psk);
	reg = RW(reg, AR9287_AN_RF2G3_OB_QAM, modal->ob_qam);
	reg = RW(reg, AR9287_AN_RF2G3_OB_PAL_OFF, modal->ob_pal_off);
	AR_WRITE(sc, AR9287_AN_RF2G3_CH0, reg);
	AR_WRITE_BARRIER(sc);
	DELAY(100);

	reg = AR_READ(sc, AR9287_AN_RF2G3_CH1);
	reg = RW(reg, AR9287_AN_RF2G3_DB1, modal->db1);
	reg = RW(reg, AR9287_AN_RF2G3_DB2, modal->db2);
	reg = RW(reg, AR9287_AN_RF2G3_OB_CCK, modal->ob_cck);
	reg = RW(reg, AR9287_AN_RF2G3_OB_PSK, modal->ob_psk);
	reg = RW(reg, AR9287_AN_RF2G3_OB_QAM, modal->ob_qam);
	reg = RW(reg, AR9287_AN_RF2G3_OB_PAL_OFF, modal->ob_pal_off);
	AR_WRITE(sc, AR9287_AN_RF2G3_CH1, reg);
	AR_WRITE_BARRIER(sc);
	DELAY(100);

	reg = AR_READ(sc, AR_PHY_RF_CTL2);
	reg = RW(reg, AR_PHY_TX_END_DATA_START, modal->txFrameToDataStart);
	reg = RW(reg, AR_PHY_TX_END_PA_ON, modal->txFrameToPaOn);
	AR_WRITE(sc, AR_PHY_RF_CTL2, reg);

	reg = AR_READ(sc, AR9287_AN_TOP2);
	reg = RW(reg, AR9287_AN_TOP2_XPABIAS_LVL, modal->xpaBiasLvl);
	AR_WRITE(sc, AR9287_AN_TOP2, reg);
	AR_WRITE_BARRIER(sc);
	DELAY(100);
}
Exemple #25
0
static char *T_initialise[] =
        { "left=expression", "right=expression" };

/* Instance Variables */

static vardecl var_binaryCondition[] =
{ IV(NAME_left, "expression", IV_BOTH,
     NAME_operant, "Left-hand side of conditional expression"),
  IV(NAME_right, "expression", IV_BOTH,
     NAME_operant, "Right-hand side of conditional expression")
};

/* Send Methods */

static senddecl send_binaryCondition[] =
{ SM(NAME_initialise, 2, T_initialise, initialiseBinaryCondition,
     DEFAULT, "Initialise from 2 expressions")
};

/* Get Methods */

#define get_binaryCondition NULL
/*
static getdecl get_binaryCondition[] =
{
};
*/

/* Resources */

#define rc_binaryCondition NULL
/*
Exemple #26
0
void
ar9287_set_power_calib(struct athn_softc *sc, struct ieee80211_channel *c)
{
	const struct ar9287_eeprom *eep = sc->eep;
	uint8_t boundaries[AR_PD_GAINS_IN_MASK];
	uint8_t pdadcs[AR_NUM_PDADC_VALUES];
	uint8_t xpdgains[AR9287_NUM_PD_GAINS];
	int8_t txpower;
	uint8_t overlap;
	uint32_t reg, offset;
	int i, j, nxpdgains;

	if (sc->eep_rev < AR_EEP_MINOR_VER_2) {
		overlap = MS(AR_READ(sc, AR_PHY_TPCRG5),
		    AR_PHY_TPCRG5_PD_GAIN_OVERLAP);
	} else
		overlap = eep->modalHeader.pdGainOverlap;

	if (sc->flags & ATHN_FLAG_OLPC) {
		/* XXX not here. */
		sc->pdadc =
		    ((const struct ar_cal_data_per_freq_olpc *)
		     eep->calPierData2G[0])->vpdPdg[0][0];
	}

	nxpdgains = 0;
	memset(xpdgains, 0, sizeof(xpdgains));
	for (i = AR9287_PD_GAINS_IN_MASK - 1; i >= 0; i--) {
		if (nxpdgains >= AR9287_NUM_PD_GAINS)
			break;		/* Can't happen. */
		if (eep->modalHeader.xpdGain & (1 << i))
			xpdgains[nxpdgains++] = i;
	}
	reg = AR_READ(sc, AR_PHY_TPCRG1);
	reg = RW(reg, AR_PHY_TPCRG1_NUM_PD_GAIN, nxpdgains - 1);
	reg = RW(reg, AR_PHY_TPCRG1_PD_GAIN_1, xpdgains[0]);
	reg = RW(reg, AR_PHY_TPCRG1_PD_GAIN_2, xpdgains[1]);
	AR_WRITE(sc, AR_PHY_TPCRG1, reg);
	AR_WRITE_BARRIER(sc);

	for (i = 0; i < AR9287_MAX_CHAINS; i++)	{
		if (!(sc->txchainmask & (1 << i)))
			continue;

		offset = i * 0x1000;

		if (sc->flags & ATHN_FLAG_OLPC) {
			ar9287_olpc_get_pdgain(sc, c, i, &txpower);

			reg = AR_READ(sc, AR_PHY_TX_PWRCTRL6_0);
			reg = RW(reg, AR_PHY_TX_PWRCTRL_ERR_EST_MODE, 3);
			AR_WRITE(sc, AR_PHY_TX_PWRCTRL6_0, reg);

			reg = AR_READ(sc, AR_PHY_TX_PWRCTRL6_1);
			reg = RW(reg, AR_PHY_TX_PWRCTRL_ERR_EST_MODE, 3);
			AR_WRITE(sc, AR_PHY_TX_PWRCTRL6_1, reg);

			/* NB: txpower is in half dB. */
			reg = AR_READ(sc, AR_PHY_CH0_TX_PWRCTRL11 + offset);
			reg = RW(reg, AR_PHY_TX_PWRCTRL_OLPC_PWR, txpower);
			AR_WRITE(sc, AR_PHY_CH0_TX_PWRCTRL11 + offset, reg);

			AR_WRITE_BARRIER(sc);
			continue;	/* That's it for open loop mode. */
		}

		/* Closed loop power control. */
		ar9287_get_pdadcs(sc, c, i, nxpdgains, overlap,
		    boundaries, pdadcs);

		/* Write boundaries. */
		if (i == 0) {
			reg  = SM(AR_PHY_TPCRG5_PD_GAIN_OVERLAP,
			    overlap);
			reg |= SM(AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1,
			    boundaries[0]);
			reg |= SM(AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2,
			    boundaries[1]);
			reg |= SM(AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3,
			    boundaries[2]);
			reg |= SM(AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4,
			    boundaries[3]);
			AR_WRITE(sc, AR_PHY_TPCRG5 + offset, reg);
		}
		/* Write PDADC values. */
		for (j = 0; j < AR_NUM_PDADC_VALUES; j += 4) {
			AR_WRITE(sc, AR_PHY_PDADC_TBL_BASE + offset + j,
			    pdadcs[j + 0] <<  0 |
			    pdadcs[j + 1] <<  8 |
			    pdadcs[j + 2] << 16 |
			    pdadcs[j + 3] << 24);
		}
		AR_WRITE_BARRIER(sc);
	}
}
void
ar9280SpurMitigate(struct ath_hal *ah, const struct ieee80211_channel *chan)
{
    static const int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
                AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 };
    static const int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
                AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 };
    static int inc[4] = { 0, 100, 0, 0 };

    int bb_spur = AR_NO_SPUR;
    int freq;
    int bin, cur_bin;
    int bb_spur_off, spur_subchannel_sd;
    int spur_freq_sd;
    int spur_delta_phase;
    int denominator;
    int upper, lower, cur_vit_mask;
    int tmp, newVal;
    int i;
    CHAN_CENTERS centers;

    int8_t mask_m[123];
    int8_t mask_p[123];
    int8_t mask_amt;
    int tmp_mask;
    int cur_bb_spur;
    HAL_BOOL is2GHz = IEEE80211_IS_CHAN_2GHZ(chan);

    OS_MEMZERO(&mask_m, sizeof(int8_t) * 123);
    OS_MEMZERO(&mask_p, sizeof(int8_t) * 123);

    ar5416GetChannelCenters(ah, chan, &centers);
    freq = centers.synth_center;

    /*
     * Need to verify range +/- 9.38 for static ht20 and +/- 18.75 for ht40,
     * otherwise spur is out-of-band and can be ignored.
     */
    for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
        cur_bb_spur = ath_hal_getSpurChan(ah, i, is2GHz);
        /* Get actual spur freq in MHz from EEPROM read value */ 
        if (is2GHz) {
            cur_bb_spur =  (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
        } else {
            cur_bb_spur =  (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
        }

        if (AR_NO_SPUR == cur_bb_spur)
            break;
        cur_bb_spur = cur_bb_spur - freq;

        if (IEEE80211_IS_CHAN_HT40(chan)) {
            if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) && 
                (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
                bb_spur = cur_bb_spur;
                break;
            }
        } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
                   (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
            bb_spur = cur_bb_spur;
            break;
        }
    }

    if (AR_NO_SPUR == bb_spur) {
#if 1
        /*
         * MRC CCK can interfere with beacon detection and cause deaf/mute.
         * Disable MRC CCK for now.
         */
        OS_REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
#else
        /* Enable MRC CCK if no spur is found in this channel. */
        OS_REG_SET_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
#endif
        return;
    } else {
        /* 
         * For Merlin, spur can break CCK MRC algorithm. Disable CCK MRC if spur
         * is found in this channel.
         */
        OS_REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
    }

    bin = bb_spur * 320;

    tmp = OS_REG_READ(ah, AR_PHY_TIMING_CTRL4_CHAIN(0));

    newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
        AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
        AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
        AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
    OS_REG_WRITE(ah, AR_PHY_TIMING_CTRL4_CHAIN(0), newVal);

    newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
        AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
        AR_PHY_SPUR_REG_MASK_RATE_SELECT |
        AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
        SM(AR5416_SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
    OS_REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);

    /* Pick control or extn channel to cancel the spur */
    if (IEEE80211_IS_CHAN_HT40(chan)) {
        if (bb_spur < 0) {
            spur_subchannel_sd = 1;
            bb_spur_off = bb_spur + 10;
        } else {
            spur_subchannel_sd = 0;
            bb_spur_off = bb_spur - 10;
        }
    } else {
        spur_subchannel_sd = 0;
        bb_spur_off = bb_spur;
    }

    /*
     * spur_delta_phase = bb_spur/40 * 2**21 for static ht20,
     * /80 for dyn2040.
     */
    if (IEEE80211_IS_CHAN_HT40(chan))
        spur_delta_phase = ((bb_spur * 262144) / 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;    
    else
        spur_delta_phase = ((bb_spur * 524288) / 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;

    /*
     * in 11A mode the denominator of spur_freq_sd should be 40 and
     * it should be 44 in 11G
     */
    denominator = IEEE80211_IS_CHAN_2GHZ(chan) ? 44 : 40;
    spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;

    newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
        SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
        SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
    OS_REG_WRITE(ah, AR_PHY_TIMING11, newVal);

    /* Choose to cancel between control and extension channels */
    newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
    OS_REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);

    /*
     * ============================================
     * Set Pilot and Channel Masks
     *
     * pilot mask 1 [31:0] = +6..-26, no 0 bin
     * pilot mask 2 [19:0] = +26..+7
     *
     * channel mask 1 [31:0] = +6..-26, no 0 bin
     * channel mask 2 [19:0] = +26..+7
     */
    cur_bin = -6000;
    upper = bin + 100;
    lower = bin - 100;

    for (i = 0; i < 4; i++) {
        int pilot_mask = 0;
        int chan_mask  = 0;
        int bp         = 0;
        for (bp = 0; bp < 30; bp++) {
            if ((cur_bin > lower) && (cur_bin < upper)) {
                pilot_mask = pilot_mask | 0x1 << bp;
                chan_mask  = chan_mask | 0x1 << bp;
            }
            cur_bin += 100;
        }
        cur_bin += inc[i];
        OS_REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
        OS_REG_WRITE(ah, chan_mask_reg[i], chan_mask);
    }

    /* =================================================
     * viterbi mask 1 based on channel magnitude
     * four levels 0-3
     *  - mask (-27 to 27) (reg 64,0x9900 to 67,0x990c)
     *      [1 2 2 1] for -9.6 or [1 2 1] for +16
     *  - enable_mask_ppm, all bins move with freq
     *
     *  - mask_select,    8 bits for rates (reg 67,0x990c)
     *  - mask_rate_cntl, 8 bits for rates (reg 67,0x990c)
     *      choose which mask to use mask or mask2
     */

    /*
     * viterbi mask 2  2nd set for per data rate puncturing
     * four levels 0-3
     *  - mask_select, 8 bits for rates (reg 67)
     *  - mask (-27 to 27) (reg 98,0x9988 to 101,0x9994)
     *      [1 2 2 1] for -9.6 or [1 2 1] for +16
     */
    cur_vit_mask = 6100;
    upper        = bin + 120;
    lower        = bin - 120;

    for (i = 0; i < 123; i++) {
        if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
            if ((abs(cur_vit_mask - bin)) < 75) {
                mask_amt = 1;
            } else {
                mask_amt = 0;
            }
            if (cur_vit_mask < 0) {
                mask_m[abs(cur_vit_mask / 100)] = mask_amt;
            } else {
                mask_p[cur_vit_mask / 100] = mask_amt;
            }
        }
        cur_vit_mask -= 100;
    }

    tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
          | (mask_m[48] << 26) | (mask_m[49] << 24)
          | (mask_m[50] << 22) | (mask_m[51] << 20)
          | (mask_m[52] << 18) | (mask_m[53] << 16)
          | (mask_m[54] << 14) | (mask_m[55] << 12)
          | (mask_m[56] << 10) | (mask_m[57] <<  8)
          | (mask_m[58] <<  6) | (mask_m[59] <<  4)
          | (mask_m[60] <<  2) | (mask_m[61] <<  0);
    OS_REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
    OS_REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);

    tmp_mask =             (mask_m[31] << 28)
          | (mask_m[32] << 26) | (mask_m[33] << 24)
          | (mask_m[34] << 22) | (mask_m[35] << 20)
          | (mask_m[36] << 18) | (mask_m[37] << 16)
          | (mask_m[48] << 14) | (mask_m[39] << 12)
          | (mask_m[40] << 10) | (mask_m[41] <<  8)
          | (mask_m[42] <<  6) | (mask_m[43] <<  4)
          | (mask_m[44] <<  2) | (mask_m[45] <<  0);
    OS_REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
    OS_REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);

    tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
          | (mask_m[18] << 26) | (mask_m[18] << 24)
          | (mask_m[20] << 22) | (mask_m[20] << 20)
          | (mask_m[22] << 18) | (mask_m[22] << 16)
          | (mask_m[24] << 14) | (mask_m[24] << 12)
          | (mask_m[25] << 10) | (mask_m[26] <<  8)
          | (mask_m[27] <<  6) | (mask_m[28] <<  4)
          | (mask_m[29] <<  2) | (mask_m[30] <<  0);
    OS_REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
    OS_REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);

    tmp_mask = (mask_m[ 0] << 30) | (mask_m[ 1] << 28)
          | (mask_m[ 2] << 26) | (mask_m[ 3] << 24)
          | (mask_m[ 4] << 22) | (mask_m[ 5] << 20)
          | (mask_m[ 6] << 18) | (mask_m[ 7] << 16)
          | (mask_m[ 8] << 14) | (mask_m[ 9] << 12)
          | (mask_m[10] << 10) | (mask_m[11] <<  8)
          | (mask_m[12] <<  6) | (mask_m[13] <<  4)
          | (mask_m[14] <<  2) | (mask_m[15] <<  0);
    OS_REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
    OS_REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);

    tmp_mask =             (mask_p[15] << 28)
          | (mask_p[14] << 26) | (mask_p[13] << 24)
          | (mask_p[12] << 22) | (mask_p[11] << 20)
          | (mask_p[10] << 18) | (mask_p[ 9] << 16)
          | (mask_p[ 8] << 14) | (mask_p[ 7] << 12)
          | (mask_p[ 6] << 10) | (mask_p[ 5] <<  8)
          | (mask_p[ 4] <<  6) | (mask_p[ 3] <<  4)
          | (mask_p[ 2] <<  2) | (mask_p[ 1] <<  0);
    OS_REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
    OS_REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);

    tmp_mask =             (mask_p[30] << 28)
          | (mask_p[29] << 26) | (mask_p[28] << 24)
          | (mask_p[27] << 22) | (mask_p[26] << 20)
          | (mask_p[25] << 18) | (mask_p[24] << 16)
          | (mask_p[23] << 14) | (mask_p[22] << 12)
          | (mask_p[21] << 10) | (mask_p[20] <<  8)
          | (mask_p[19] <<  6) | (mask_p[18] <<  4)
          | (mask_p[17] <<  2) | (mask_p[16] <<  0);
    OS_REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
    OS_REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);

    tmp_mask =             (mask_p[45] << 28)
          | (mask_p[44] << 26) | (mask_p[43] << 24)
          | (mask_p[42] << 22) | (mask_p[41] << 20)
          | (mask_p[40] << 18) | (mask_p[39] << 16)
          | (mask_p[38] << 14) | (mask_p[37] << 12)
          | (mask_p[36] << 10) | (mask_p[35] <<  8)
          | (mask_p[34] <<  6) | (mask_p[33] <<  4)
          | (mask_p[32] <<  2) | (mask_p[31] <<  0);
    OS_REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
    OS_REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);

    tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
          | (mask_p[59] << 26) | (mask_p[58] << 24)
          | (mask_p[57] << 22) | (mask_p[56] << 20)
          | (mask_p[55] << 18) | (mask_p[54] << 16)
          | (mask_p[53] << 14) | (mask_p[52] << 12)
          | (mask_p[51] << 10) | (mask_p[50] <<  8)
          | (mask_p[49] <<  6) | (mask_p[48] <<  4)
          | (mask_p[47] <<  2) | (mask_p[46] <<  0);
    OS_REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
    OS_REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
}
Exemple #28
0
static HAL_BOOL
ar2316SetPowerTable(struct ath_hal *ah,
	int16_t *minPower, int16_t *maxPower, HAL_CHANNEL_INTERNAL *chan, 
	u_int16_t *rfXpdGain)
{
	struct ath_hal_5212 *ahp = AH5212(ah);
	RAW_DATA_STRUCT_2316 *pRawDataset = AH_NULL;
	u_int16_t pdGainOverlap_t2;
	int16_t minCalPower2316_t2;
	u_int16_t *pdadcValues = ahp->ah_pcdacTable;
	u_int16_t gainBoundaries[4];
	u_int32_t i, reg32, regoffset;

	HALDEBUG(ah, "%s:chan 0x%x flag 0x%x\n", __func__, chan->channel,chan->channelFlags);

	if (IS_CHAN_G(chan) || IS_CHAN_108G(chan))
		pRawDataset = &ahp->ah_rawDataset2413[headerInfo11G];
	else if (IS_CHAN_B(chan))
		pRawDataset = &ahp->ah_rawDataset2413[headerInfo11B];
	else {
		HALDEBUG(ah, "%s:illegal mode\n", __func__);
		return AH_FALSE;
	}

	pdGainOverlap_t2 = (u_int16_t) SM(OS_REG_READ(ah, AR_PHY_TPCRG5),
					  AR_PHY_TPCRG5_PD_GAIN_OVERLAP);
    
	ar2316getGainBoundariesAndPdadcsForPowers(ah, chan->channel,
		pRawDataset, pdGainOverlap_t2,&minCalPower2316_t2,gainBoundaries,
		rfXpdGain, pdadcValues);

	OS_REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, 
			 (pRawDataset->pDataPerChannel[0].numPdGains - 1));

	/*
	 * Note the pdadc table may not start at 0 dBm power, could be
	 * negative or greater than 0.  Need to offset the power
	 * values by the amount of minPower for griffin
	 */
	if (minCalPower2316_t2 != 0)
		ahp->ah_txPowerIndexOffset = (int16_t)(0 - minCalPower2316_t2);
	else
		ahp->ah_txPowerIndexOffset = 0;

	/* Finally, write the power values into the baseband power table */
	regoffset = 0x9800 + (672 <<2); /* beginning of pdadc table in griffin */
	for (i = 0; i < 32; i++) {
		reg32 = ((pdadcValues[4*i + 0] & 0xFF) << 0)  | 
			((pdadcValues[4*i + 1] & 0xFF) << 8)  |
			((pdadcValues[4*i + 2] & 0xFF) << 16) |
			((pdadcValues[4*i + 3] & 0xFF) << 24) ;        
		OS_REG_WRITE(ah, regoffset, reg32);
		regoffset += 4;
	}

	OS_REG_WRITE(ah, AR_PHY_TPCRG5, 
		     SM(pdGainOverlap_t2, AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | 
		     SM(gainBoundaries[0], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) |
		     SM(gainBoundaries[1], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) |
		     SM(gainBoundaries[2], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) |
		     SM(gainBoundaries[3], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));

	return AH_TRUE;
}
Exemple #29
0
/* Type declarations */

static char *T_initialise[] =
        { "button=[button_name]", "modifier=[modifier]" };

/* Instance Variables */

static vardecl var_moveOutlineGesture[] =
{ IV(NAME_outline, "box", IV_GET,
     NAME_feedback, "The outline moved")
};

/* Send Methods */

static senddecl send_moveOutlineGesture[] =
{ SM(NAME_drag, 1, "event", dragMoveOutlineGesture,
     DEFAULT, "Drag outline to next position"),
  SM(NAME_initialise, 2, T_initialise, initialiseMoveOutlineGesture,
     DEFAULT, "Create from button and modifier"),
  SM(NAME_initiate, 1, "event", initiateMoveOutlineGesture,
     DEFAULT, "Display outline and change cursor"),
  SM(NAME_terminate, 1, "event", terminateMoveOutlineGesture,
     DEFAULT, "Move object and undisplay outline")
};

/* Get Methods */

#define get_moveOutlineGesture NULL
/*
static getdecl get_moveOutlineGesture[] =
{
};
Exemple #30
0
static vardecl var_area[] =
{ IV(NAME_x, "int", IV_BOTH,
     NAME_position, "Origin's X-value"),
  IV(NAME_y, "int", IV_BOTH,
     NAME_position, "Origin's Y-value"),
  IV(NAME_width, "int", IV_BOTH,
     NAME_dimension, "Width in pixels (may be negative)"),
  IV(NAME_height, "int", IV_BOTH,
     NAME_dimension, "Height in pixels (may be negative)")
};

/* Send Methods */

static senddecl send_area[] =
{ SM(NAME_initialise, 4, T_dxdydwdh, initialiseArea,
     DEFAULT, "Create area from X, Y, W and H"),
  SM(NAME_copy, 1, "area", copyArea,
     NAME_copy, "Copy X, Y, W and H from argument area"),
  SM(NAME_equal, 1, "area", equalArea,
     NAME_equality, "Test if area is equal to the argument"),
  SM(NAME_normalise, 0, NULL, normaliseArea,
     NAME_orientation, "Make top-left corner the origin"),
  SM(NAME_orientation, 1, "{north_west,south_west,north_east,south_east}",
     orientationArea,
     NAME_orientation, "Put origin at indicated corner"),
  SM(NAME_center, 1, "point", centerArea,
     NAME_position, "Move to make point the center"),
  SM(NAME_position, 1, "point", positionArea,
     NAME_position, "Move origin to point"),
  SM(NAME_relativeMove, 1, "point", relativeMoveArea,
     NAME_position, "Move origin relative by point"),