コード例 #1
0
static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
		      int slots_num, u32 txq_id)
{
	int ret;

	txq->need_update = 0;
	memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);

	/*
	 * For the default queues 0-3, set up the swq_id
	 * already -- all others need to get one later
	 * (if they need one at all).
	 */
	if (txq_id < 4)
		iwl_set_swq_id(txq, txq_id, txq_id);

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
	ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
			txq_id);
	if (ret)
		return ret;

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
			     txq->q.dma_addr >> 8);

	return 0;
}
コード例 #2
0
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
				int sta_id, int tid)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;

	txq_id = iwlagn_txq_ctx_activate_free(trans);
	if (txq_id == -1) {
		IWL_ERR(trans, "No free aggregation queue available\n");
		return -ENXIO;
	}

	trans_pcie->agg_txq[sta_id][tid] = txq_id;
	iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);

	return 0;
}
コード例 #3
0
ファイル: iwl-agn-ucode.c プロジェクト: AbheekG/XIA-for-Linux
int iwlagn_alive_notify(struct iwl_priv *priv)
{
    const struct queue_to_fifo_ac *queue_to_fifo;
    u32 a;
    unsigned long flags;
    int i, chan;
    u32 reg_val;

    spin_lock_irqsave(&priv->lock, flags);

    priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
    a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
    for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
            a += 4)
        iwl_write_targ_mem(priv, a, 0);
    for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
            a += 4)
        iwl_write_targ_mem(priv, a, 0);
    for (; a < priv->scd_base_addr +
            IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
        iwl_write_targ_mem(priv, a, 0);

    iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
                   priv->scd_bc_tbls.dma >> 10);

    /* Enable DMA channel */
    for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
        iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
                           FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
                           FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

    /* Update FH chicken bits */
    reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
    iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
                       reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

    iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
                   IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
    iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);

    /* initiate the queues */
    for (i = 0; i < priv->hw_params.max_txq_num; i++) {
        iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
        iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
        iwl_write_targ_mem(priv, priv->scd_base_addr +
                           IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
        iwl_write_targ_mem(priv, priv->scd_base_addr +
                           IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
                           sizeof(u32),
                           ((SCD_WIN_SIZE <<
                             IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
                            IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
                           ((SCD_FRAME_LIMIT <<
                             IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
                            IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
    }

    iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
                   IWL_MASK(0, priv->hw_params.max_txq_num));

    /* Activate all Tx DMA/FIFO channels */
    priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));

    /* map queues to FIFOs */
    if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
        queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
    else
        queue_to_fifo = iwlagn_default_queue_to_tx_fifo;

    iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);

    /* make sure all queue are not stopped */
    memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
    for (i = 0; i < 4; i++)
        atomic_set(&priv->queue_stop_count[i], 0);

    /* reset to 0 to enable all the queue first */
    priv->txq_ctx_active_msk = 0;

    BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
    BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);

    for (i = 0; i < 10; i++) {
        int fifo = queue_to_fifo[i].fifo;
        int ac = queue_to_fifo[i].ac;

        iwl_txq_ctx_activate(priv, i);

        if (fifo == IWL_TX_FIFO_UNUSED)
            continue;

        if (ac != IWL_AC_UNSET)
            iwl_set_swq_id(&priv->txq[i], ac, i);
        iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
    }

    spin_unlock_irqrestore(&priv->lock, flags);

    /* Enable L1-Active */
    iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
                        APMG_PCIDEV_STT_VAL_L1_ACT_DIS);

    iwlagn_send_wimax_coex(priv);

    iwlagn_set_Xtal_calib(priv);
    iwl_send_calib_results(priv);

    return 0;
}