Exemplo n.º 1
0
int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
				  u16 ssn_idx, u8 tx_fifo)
{
	if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
	    (IWLAGN_FIRST_AMPDU_QUEUE +
		priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
		IWL_ERR(priv,
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
			priv->cfg->base_params->num_of_ampdu_queues - 1);
		return -EINVAL;
	}

	iwlagn_tx_queue_stop_scheduler(priv, txq_id);

	iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id));

	priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
	priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
	/* supposes that ssn_idx is valid (!= 0xFFF) */
	iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);

	iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
	iwl_txq_ctx_deactivate(priv, txq_id);
	iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);

	return 0;
}
Exemplo n.º 2
0
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u8 txq_id = trans_pcie->agg_txq[sta_id][tid];

	if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
		IWL_ERR(trans,
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
			hw_params(trans).num_ampdu_queues - 1);
		return -EINVAL;
	}

	iwlagn_tx_queue_stop_scheduler(trans, txq_id);

	iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));

	trans_pcie->agg_txq[sta_id][tid] = 0;
	trans_pcie->txq[txq_id].q.read_ptr = 0;
	trans_pcie->txq[txq_id].q.write_ptr = 0;
	/* supposes that ssn_idx is valid (!= 0xFFF) */
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);

	iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
	iwl_txq_ctx_deactivate(trans_pcie, txq_id);
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
	return 0;
}
Exemplo n.º 3
0
static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
{
	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
		iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
	else
		iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
}
Exemplo n.º 4
0
static int iwl_init_otp_access(struct iwl_trans *trans)
{
	int ret;

	/* Enable 40MHz radio clock */
	iwl_write32(trans, CSR_GP_CNTRL,
		    iwl_read32(trans, CSR_GP_CNTRL) |
		    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/* wait for clock to be ready */
	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
			   25000);
	if (ret < 0) {
		IWL_ERR(trans, "Time out access OTP\n");
	} else {
		iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
				  APMG_PS_CTRL_VAL_RESET_REQ);
		udelay(5);
		iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
				    APMG_PS_CTRL_VAL_RESET_REQ);

		/*
		 * CSR auto clock gate disable bit -
		 * this is only applicable for HW with OTP shadow RAM
		 */
		if (trans->cfg->base_params->shadow_ram_support)
			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
	}
	return ret;
}
Exemplo n.º 5
0
void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
		WARN_ONCE(1, "queue %d not used", txq_id);
		return;
	}

	iwlagn_tx_queue_stop_scheduler(trans, txq_id);

	iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));

	trans_pcie->txq[txq_id].q.read_ptr = 0;
	trans_pcie->txq[txq_id].q.write_ptr = 0;
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);

	iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));

	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
				      0, false);
}
Exemplo n.º 6
0
static int iwl_init_otp_access(struct iwl_priv *priv)
{
	int ret;

	/* Enable 40MHz radio clock */
	_iwl_write32(priv, CSR_GP_CNTRL,
		     _iwl_read32(priv, CSR_GP_CNTRL) |
		     CSR_GP_CNTRL_REG_FLAG_INIT_DONE);

	/* wait for clock to be ready */
	ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
				  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
				  25000);
	if (ret < 0)
		IWL_ERR(priv, "Time out access OTP\n");
	else {
		iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
				  APMG_PS_CTRL_VAL_RESET_REQ);
		udelay(5);
		iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
				    APMG_PS_CTRL_VAL_RESET_REQ);
	}
	return ret;
}
Exemplo n.º 7
0
Arquivo: tx.c Projeto: dgarnier/linux
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);

	/* Stop this Tx queue before configuring it */
	iwl_txq_set_inactive(trans, txq_id);

	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
	if (sta_id != IWL_INVALID_STATION) {
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);

		/* Map receiver-address / traffic-ID to this queue */
		iwl_txq_set_ratid_map(trans, ra_tid, txq_id);

		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
	}

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);

	/* Set up Tx window size and frame limit for this queue */
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
}
Exemplo n.º 8
0
int iwlagn_alive_notify(struct iwl_priv *priv)
{
    const struct queue_to_fifo_ac *queue_to_fifo;
    u32 a;
    unsigned long flags;
    int i, chan;
    u32 reg_val;

    spin_lock_irqsave(&priv->lock, flags);

    priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
    a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
    for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
            a += 4)
        iwl_write_targ_mem(priv, a, 0);
    for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
            a += 4)
        iwl_write_targ_mem(priv, a, 0);
    for (; a < priv->scd_base_addr +
            IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
        iwl_write_targ_mem(priv, a, 0);

    iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
                   priv->scd_bc_tbls.dma >> 10);

    /* Enable DMA channel */
    for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
        iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
                           FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
                           FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

    /* Update FH chicken bits */
    reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
    iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
                       reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

    iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
                   IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
    iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);

    /* initiate the queues */
    for (i = 0; i < priv->hw_params.max_txq_num; i++) {
        iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
        iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
        iwl_write_targ_mem(priv, priv->scd_base_addr +
                           IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
        iwl_write_targ_mem(priv, priv->scd_base_addr +
                           IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
                           sizeof(u32),
                           ((SCD_WIN_SIZE <<
                             IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
                            IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
                           ((SCD_FRAME_LIMIT <<
                             IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
                            IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
    }

    iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
                   IWL_MASK(0, priv->hw_params.max_txq_num));

    /* Activate all Tx DMA/FIFO channels */
    priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));

    /* map queues to FIFOs */
    if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
        queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
    else
        queue_to_fifo = iwlagn_default_queue_to_tx_fifo;

    iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);

    /* make sure all queue are not stopped */
    memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
    for (i = 0; i < 4; i++)
        atomic_set(&priv->queue_stop_count[i], 0);

    /* reset to 0 to enable all the queue first */
    priv->txq_ctx_active_msk = 0;

    BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
    BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);

    for (i = 0; i < 10; i++) {
        int fifo = queue_to_fifo[i].fifo;
        int ac = queue_to_fifo[i].ac;

        iwl_txq_ctx_activate(priv, i);

        if (fifo == IWL_TX_FIFO_UNUSED)
            continue;

        if (ac != IWL_AC_UNSET)
            iwl_set_swq_id(&priv->txq[i], ac, i);
        iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
    }

    spin_unlock_irqrestore(&priv->lock, flags);

    /* Enable L1-Active */
    iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
                        APMG_PCIDEV_STT_VAL_L1_ACT_DIS);

    iwlagn_send_wimax_coex(priv);

    iwlagn_set_Xtal_calib(priv);
    iwl_send_calib_results(priv);

    return 0;
}