Esempio n. 1
0
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
	u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */

	if (!priv->cfg->use_isr_legacy)
		rb_timeout = RX_RB_TIMEOUT;

	if (priv->cfg->mod_params->amsdu_size_8K)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
	iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->dma_addr >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
			   rb_size|
			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);

	return 0;
}
Esempio n. 2
0
File: rx.c Progetto: sosilent/kis_pc
/**
 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
 */
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
                                   struct iwl_rx_queue *q)
{
    unsigned long flags;
    u32 reg;

    spin_lock_irqsave(&q->lock, flags);

    if (q->need_update == 0)
        goto exit_unlock;

    if (trans->cfg->base_params->shadow_reg_enable) {
        /* shadow register enabled */
        /* Device expects a multiple of 8 */
        q->write_actual = (q->write & ~0x7);
        iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
    } else {
        struct iwl_trans_pcie *trans_pcie =
            IWL_TRANS_GET_PCIE_TRANS(trans);

        /* If power-saving is in use, make sure device is awake */
        if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
            reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

            if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
                IWL_DEBUG_INFO(trans,
                               "Rx queue requesting wakeup,"
                               " GP1 = 0x%x\n", reg);
                iwl_set_bit(trans, CSR_GP_CNTRL,
                            CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
                goto exit_unlock;
            }

            q->write_actual = (q->write & ~0x7);
            iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
                               q->write_actual);

            /* Else device is assumed to be awake */
        } else {
            /* Device expects a multiple of 8 */
            q->write_actual = (q->write & ~0x7);
            iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
                               q->write_actual);
        }
    }
    q->need_update = 0;

exit_unlock:
    spin_unlock_irqrestore(&q->lock, flags);
}
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
		return;

	if (cfg(trans)->base_params->shadow_reg_enable) {
		
		iwl_write32(trans, HBUS_TARG_WRPTR,
			    txq->q.write_ptr | (txq_id << 8));
	} else {
		
		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
				IWL_DEBUG_INFO(trans,
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
				iwl_set_bit(trans, CSR_GP_CNTRL,
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}

			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
				     txq->q.write_ptr | (txq_id << 8));

		} else
			iwl_write32(trans, HBUS_TARG_WRPTR,
				    txq->q.write_ptr | (txq_id << 8));
	}
	txq->need_update = 0;
}
Esempio n. 4
0
/**
 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
 *   using sample data 100 bytes apart.  If these sample points are good,
 *   it's a pretty good bet that everything between them is good, too.
 */
static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
{
    u32 val;
    int ret = 0;
    u32 errcnt = 0;
    u32 i;

    IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);

    for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
        /* read data comes through single port, auto-incr addr */
        /* NOTE: Use the debugless read so we don't flood kernel log
         * if IWL_DL_IO is set */
        iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
                           i + IWLAGN_RTC_INST_LOWER_BOUND);
        val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
        if (val != le32_to_cpu(*image)) {
            ret = -EIO;
            errcnt++;
            if (errcnt >= 3)
                break;
        }
    }

    return ret;
}
Esempio n. 5
0
void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
				int txq_id, u32 index)
{
	iwl_write_direct32(priv, HBUS_TARG_WRPTR,
			(index & 0xff) | (txq_id << 8));
	iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index);
}
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
			struct iwl_rx_queue *q)
{
	unsigned long flags;
	u32 reg;

	spin_lock_irqsave(&q->lock, flags);

	if (q->need_update == 0)
		goto exit_unlock;

	if (cfg(trans)->base_params->shadow_reg_enable) {
		/*                         */
		/*                                */
		q->write_actual = (q->write & ~0x7);
		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
	} else {
		/*                                                      */
		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
				IWL_DEBUG_INFO(trans,
					"Rx queue requesting wakeup,"
					" GP1 = 0x%x\n", reg);
				iwl_set_bit(trans, CSR_GP_CNTRL,
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				goto exit_unlock;
			}

			q->write_actual = (q->write & ~0x7);
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
					q->write_actual);

		/*                                    */
		} else {
			/*                                */
			q->write_actual = (q->write & ~0x7);
			iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
				q->write_actual);
		}
	}
	q->need_update = 0;

 exit_unlock:
	spin_unlock_irqrestore(&q->lock, flags);
}
Esempio n. 7
0
static int iwl_trans_rx_stop(struct iwl_priv *priv)
{

	/* stop Rx DMA */
	iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
			    FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}
Esempio n. 8
0
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
				int txq_id, u32 index)
{
	IWL_DEBUG_TX_QUEUES(trans, "Q %d  WrPtr: %d", txq_id, index & 0xff);
	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			(index & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
}
Esempio n. 9
0
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

	if (trans_pcie->rx_buf_size_8k)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   rb_size|
			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
Esempio n. 10
0
/**
 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
 */
int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
{
	unsigned long flags;
	u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
	u32 reg;
	int ret = 0;

	spin_lock_irqsave(&q->lock, flags);

	if (q->need_update == 0)
		goto exit_unlock;

	/* If power-saving is in use, make sure device is awake */
	if (test_bit(STATUS_POWER_PMI, &priv->status)) {
		reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
			IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
				      reg);
			iwl_set_bit(priv, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
			goto exit_unlock;
		}

		q->write_actual = (q->write & ~0x7);
		iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);

	/* Else device is assumed to be awake */
	} else {
		/* Device expects a multiple of 8 */
		q->write_actual = (q->write & ~0x7);
		iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
	}

	q->need_update = 0;

 exit_unlock:
	spin_unlock_irqrestore(&q->lock, flags);
	return ret;
}
Esempio n. 11
0
int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
{
	u32 stat_flags = 0;
	struct iwl_host_cmd cmd = {
		.id = REPLY_STATISTICS_CMD,
		.meta.flags = flags,
		.len = sizeof(stat_flags),
		.data = (u8 *) &stat_flags,
	};
	return iwl_send_cmd(priv, &cmd);
}
EXPORT_SYMBOL(iwl_send_statistics_request);

/**
 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
 *   using sample data 100 bytes apart.  If these sample points are good,
 *   it's a pretty good bet that everything between them is good, too.
 */
static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
{
	u32 val;
	int ret = 0;
	u32 errcnt = 0;
	u32 i;

	IWL_DEBUG_INFO("ucode inst image size is %u\n", len);

	ret = iwl_grab_nic_access(priv);
	if (ret)
		return ret;

	for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
		/* read data comes through single port, auto-incr addr */
		/* NOTE: Use the debugless read so we don't flood kernel log
		 * if IWL_DL_IO is set */
		iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
			i + RTC_INST_LOWER_BOUND);
		val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
		if (val != le32_to_cpu(*image)) {
			ret = -EIO;
			errcnt++;
			if (errcnt >= 3)
				break;
		}
	}

	iwl_release_nic_access(priv);

	return ret;
}
Esempio n. 12
0
/*
 * ucode
 */
static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
                               struct fw_desc *image, u32 dst_addr)
{
    dma_addr_t phy_addr = image->p_addr;
    u32 byte_cnt = image->len;
    int ret;

    priv->ucode_write_complete = 0;

    iwl_write_direct32(priv,
                       FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
                       FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);

    iwl_write_direct32(priv,
                       FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);

    iwl_write_direct32(priv,
                       FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
                       phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);

    iwl_write_direct32(priv,
                       FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
                       (iwl_get_dma_hi_addr(phy_addr)
                        << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);

    iwl_write_direct32(priv,
                       FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
                       1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
                       1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
                       FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);

    iwl_write_direct32(priv,
                       FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
                       FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
                       FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
                       FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);

    IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
    ret = wait_event_interruptible_timeout(priv->wait_command_queue,
                                           priv->ucode_write_complete, 5 * HZ);
    if (ret == -ERESTARTSYS) {
        IWL_ERR(priv, "Could not load the %s uCode section due "
                "to interrupt\n", name);
        return ret;
    }
    if (!ret) {
        IWL_ERR(priv, "Could not load the %s uCode section\n",
                name);
        return -ETIMEDOUT;
    }

    return 0;
}
Esempio n. 13
0
File: tx.c Progetto: dgarnier/linux
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
		return;

	if (trans->cfg->base_params->shadow_reg_enable) {
		/* shadow register enabled */
		iwl_write32(trans, HBUS_TARG_WRPTR,
			    txq->q.write_ptr | (txq_id << 8));
	} else {
		struct iwl_trans_pcie *trans_pcie =
			IWL_TRANS_GET_PCIE_TRANS(trans);
		/* if we're trying to save power */
		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);

			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
				IWL_DEBUG_INFO(trans,
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
				iwl_set_bit(trans, CSR_GP_CNTRL,
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}

			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
				     txq->q.write_ptr | (txq_id << 8));

		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
			iwl_write32(trans, HBUS_TARG_WRPTR,
				    txq->q.write_ptr | (txq_id << 8));
	}
	txq->need_update = 0;
}
Esempio n. 14
0
/**
 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
 *     looking at all data.
 */
static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
				 u32 len)
{
	u32 val;
	u32 save_len = len;
	int ret = 0;
	u32 errcnt;

	IWL_DEBUG_INFO("ucode inst image size is %u\n", len);

	ret = iwl_grab_nic_access(priv);
	if (ret)
		return ret;

	iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);

	errcnt = 0;
	for (; len > 0; len -= sizeof(u32), image++) {
		/* read data comes through single port, auto-incr addr */
		/* NOTE: Use the debugless read so we don't flood kernel log
		 * if IWL_DL_IO is set */
		val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
		if (val != le32_to_cpu(*image)) {
			IWL_ERROR("uCode INST section is invalid at "
				  "offset 0x%x, is 0x%x, s/b 0x%x\n",
				  save_len - len, val, le32_to_cpu(*image));
			ret = -EIO;
			errcnt++;
			if (errcnt >= 20)
				break;
		}
	}

	iwl_release_nic_access(priv);

	if (!errcnt)
		IWL_DEBUG_INFO
		    ("ucode image in INSTRUCTION memory is good\n");

	return ret;
}
Esempio n. 15
0
/*
 * ucode
 */
static int iwl5000_load_section(struct iwl_priv *priv,
				struct fw_desc *image,
				u32 dst_addr)
{
	dma_addr_t phy_addr = image->p_addr;
	u32 byte_cnt = image->len;

	iwl_write_direct32(priv,
		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);

	iwl_write_direct32(priv,
		FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);

	iwl_write_direct32(priv,
		FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
		phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);

	iwl_write_direct32(priv,
		FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
		(iwl_get_dma_hi_addr(phy_addr)
			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);

	iwl_write_direct32(priv,
		FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
		1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
		FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);

	iwl_write_direct32(priv,
		FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
		FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);

	return 0;
}
Esempio n. 16
0
static int iwl5000_alive_notify(struct iwl_priv *priv)
{
	u32 a;
	unsigned long flags;
	int i, chan;
	u32 reg_val;

	spin_lock_irqsave(&priv->lock, flags);

	priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
	a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
	for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
		a += 4)
		iwl_write_targ_mem(priv, a, 0);
	for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
		a += 4)
		iwl_write_targ_mem(priv, a, 0);
	for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
		iwl_write_targ_mem(priv, a, 0);

	iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
		       priv->scd_bc_tbls.dma >> 10);

	/* Enable DMA channel */
	for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
		iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
	reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

	iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
		IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
	iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);

	/* initiate the queues */
	for (i = 0; i < priv->hw_params.max_txq_num; i++) {
		iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
		iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
		iwl_write_targ_mem(priv, priv->scd_base_addr +
				IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
		iwl_write_targ_mem(priv, priv->scd_base_addr +
				IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
				sizeof(u32),
				((SCD_WIN_SIZE <<
				IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
				((SCD_FRAME_LIMIT <<
				IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
	}

	iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
			IWL_MASK(0, priv->hw_params.max_txq_num));

	/* Activate all Tx DMA/FIFO channels */
	priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));

	iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);

	/* map qos queues to fifos one-to-one */
	for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
		int ac = iwl5000_default_queue_to_tx_fifo[i];
		iwl_txq_ctx_activate(priv, i);
		iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
	}
	/* TODO - need to initialize those FIFOs inside the loop above,
	 * not only mark them as active */
	iwl_txq_ctx_activate(priv, 4);
	iwl_txq_ctx_activate(priv, 7);
	iwl_txq_ctx_activate(priv, 8);
	iwl_txq_ctx_activate(priv, 9);

	spin_unlock_irqrestore(&priv->lock, flags);


	iwl5000_send_wimax_coex(priv);

	iwl5000_set_Xtal_calib(priv);
	iwl_send_calib_results(priv);

	return 0;
}
/*
 * This function handles the user application commands for register access.
 *
 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
 * handlers respectively.
 *
 * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
 * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
 * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
 * the success of the command execution.
 *
 * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
 * value is returned with IWL_TM_ATTR_REG_VALUE32.
 *
 * @hw: ieee80211_hw object that represents the device
 * @tb: gnl message fields from the user space
 */
static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
{
	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
	u32 ofs, val32, cmd;
	u8 val8;
	struct sk_buff *skb;
	int status = 0;

	if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
		IWL_ERR(priv, "Missing register offset\n");
		return -ENOMSG;
	}
	ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
	IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);

	/* Allow access only to FH/CSR/HBUS in direct mode.
	Since we don't have the upper bounds for the CSR and HBUS segments,
	we will use only the upper bound of FH for sanity check. */
	cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
	if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
		cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
		cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
		(ofs >= FH_MEM_UPPER_BOUND)) {
		IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n",
			FH_MEM_UPPER_BOUND);
		return -EINVAL;
	}

	switch (cmd) {
	case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
		val32 = iwl_read_direct32(trans(priv), ofs);
		IWL_INFO(priv, "32bit value to read 0x%x\n", val32);

		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
		if (!skb) {
			IWL_ERR(priv, "Memory allocation fail\n");
			return -ENOMEM;
		}
		NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
		status = cfg80211_testmode_reply(skb);
		if (status < 0)
			IWL_ERR(priv, "Error sending msg : %d\n", status);
		break;
	case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
		if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
			IWL_ERR(priv, "Missing value to write\n");
			return -ENOMSG;
		} else {
			val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
			IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
			iwl_write_direct32(trans(priv), ofs, val32);
		}
		break;
	case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
		if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
			IWL_ERR(priv, "Missing value to write\n");
			return -ENOMSG;
		} else {
			val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
			IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
			iwl_write8(trans(priv), ofs, val8);
		}
		break;
	default:
		IWL_ERR(priv, "Unknown testmode register command ID\n");
		return -ENOSYS;
	}

	return status;

nla_put_failure:
	kfree_skb(skb);
	return -EMSGSIZE;
}
Esempio n. 18
0
File: rx.c Progetto: andy-shev/linux
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */

	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_4K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
		break;
	case IWL_AMSDU_12K:
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
	}

	/* Stop Rx DMA */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	/* reset and flush pointers */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k or 12k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   rb_size|
			   (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);

	/* W/A for interrupt coalescing bug in 7260 and 3160 */
	if (trans->cfg->host_interrupt_operation_mode)
		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
}
Esempio n. 19
0
int iwlagn_alive_notify(struct iwl_priv *priv)
{
    const struct queue_to_fifo_ac *queue_to_fifo;
    u32 a;
    unsigned long flags;
    int i, chan;
    u32 reg_val;

    spin_lock_irqsave(&priv->lock, flags);

    priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
    a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
    for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
            a += 4)
        iwl_write_targ_mem(priv, a, 0);
    for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
            a += 4)
        iwl_write_targ_mem(priv, a, 0);
    for (; a < priv->scd_base_addr +
            IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
        iwl_write_targ_mem(priv, a, 0);

    iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
                   priv->scd_bc_tbls.dma >> 10);

    /* Enable DMA channel */
    for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
        iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
                           FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
                           FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

    /* Update FH chicken bits */
    reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
    iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
                       reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

    iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
                   IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
    iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);

    /* initiate the queues */
    for (i = 0; i < priv->hw_params.max_txq_num; i++) {
        iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
        iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
        iwl_write_targ_mem(priv, priv->scd_base_addr +
                           IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
        iwl_write_targ_mem(priv, priv->scd_base_addr +
                           IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
                           sizeof(u32),
                           ((SCD_WIN_SIZE <<
                             IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
                            IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
                           ((SCD_FRAME_LIMIT <<
                             IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
                            IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
    }

    iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
                   IWL_MASK(0, priv->hw_params.max_txq_num));

    /* Activate all Tx DMA/FIFO channels */
    priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));

    /* map queues to FIFOs */
    if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
        queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
    else
        queue_to_fifo = iwlagn_default_queue_to_tx_fifo;

    iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);

    /* make sure all queue are not stopped */
    memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
    for (i = 0; i < 4; i++)
        atomic_set(&priv->queue_stop_count[i], 0);

    /* reset to 0 to enable all the queue first */
    priv->txq_ctx_active_msk = 0;

    BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
    BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);

    for (i = 0; i < 10; i++) {
        int fifo = queue_to_fifo[i].fifo;
        int ac = queue_to_fifo[i].ac;

        iwl_txq_ctx_activate(priv, i);

        if (fifo == IWL_TX_FIFO_UNUSED)
            continue;

        if (ac != IWL_AC_UNSET)
            iwl_set_swq_id(&priv->txq[i], ac, i);
        iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
    }

    spin_unlock_irqrestore(&priv->lock, flags);

    /* Enable L1-Active */
    iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
                        APMG_PCIDEV_STT_VAL_L1_ACT_DIS);

    iwlagn_send_wimax_coex(priv);

    iwlagn_set_Xtal_calib(priv);
    iwl_send_calib_results(priv);

    return 0;
}
Esempio n. 20
0
/*
 * iwl_pcie_rx_stop - stops the Rx DMA
 */
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
	iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
				   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}
Esempio n. 21
0
File: tx.c Progetto: dgarnier/linux
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);

	/* Stop this Tx queue before configuring it */
	iwl_txq_set_inactive(trans, txq_id);

	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
	if (sta_id != IWL_INVALID_STATION) {
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);

		/* Map receiver-address / traffic-ID to this queue */
		iwl_txq_set_ratid_map(trans, ra_tid, txq_id);

		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
	}

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);

	/* Set up Tx window size and frame limit for this queue */
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
}