static void iwl5000_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) { iwl_write_direct32(priv, HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index); }
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) { iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); }
static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) { if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); else iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); }
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) { IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff); iwl_write_direct32(trans, HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index); }
static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) { /* Simply stop the queue, but don't change any configuration; * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); }
static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) { /* Simply stop the queue, but don't change any configuration; * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id), (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); }
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, struct iwl_tx_queue *txq, int tx_fifo_id, bool active) { int txq_id = txq->q.id; iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); if (active) IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n", txq_id, tx_fifo_id); else IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); }
static int iwl5000_apm_init(struct iwl_priv *priv) { int ret = 0; iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */ iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); /* Set FH wait threshold to maximum (HW error during stress W/A) */ iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); /* enable HAP INTA to move device L1a -> L0s */ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); if (priv->cfg->need_pll_cfg) iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); /* set "initialization complete" bit to move adapter * D0U* --> D0A* state */ iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); /* wait for clock stabilization */ ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); if (ret < 0) { IWL_DEBUG_INFO(priv, "Failed to init the card\n"); return ret; } /* enable DMA */ iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); /* disable L1-Active */ iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); return ret; }
void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, struct iwl_tx_queue *txq, int tx_fifo_id, int scd_retry) { int txq_id = txq->q.id; int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id), (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); txq->sched_retry = scd_retry; IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", active ? "Activate" : "Deactivate", scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); }
static int iwl_tx_init(struct iwl_priv *priv) { int ret; int txq_id, slots_num; unsigned long flags; bool alloc = false; if (!priv->txq) { ret = iwl_trans_tx_alloc(priv); if (ret) goto error; alloc = true; } spin_lock_irqsave(&priv->lock, flags); /* Turn off all Tx DMA fifos */ iwl_write_prph(priv, SCD_TXFACT, 0); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); spin_unlock_irqrestore(&priv->lock, flags); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { slots_num = (txq_id == priv->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, txq_id); if (ret) { IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); goto error; } } return 0; error: /*Upon error, free only if we allocated something */ if (alloc) trans_tx_free(&priv->trans); return ret; }
static int iwl5000_apm_reset(struct iwl_priv *priv) { int ret = 0; iwl5000_apm_stop_master(priv); iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); udelay(10); /* FIXME: put here L1A -L0S w/a */ if (priv->cfg->need_pll_cfg) iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); /* set "initialization complete" bit to move adapter * D0U* --> D0A* state */ iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); /* wait for clock stabilization */ ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); if (ret < 0) { IWL_DEBUG_INFO(priv, "Failed to init the card\n"); goto out; } /* enable DMA */ iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); /* disable L1-Active */ iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); out: return ret; }
static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr, u32 size, unsigned char *buf) { struct iwl_trans *trans = trans(priv); u32 val, i; unsigned long flags; if (IWL_TM_ABS_PRPH_START <= addr && addr < IWL_TM_ABS_PRPH_START + PRPH_END) { /* Periphery writes can be 1-3 bytes long, or DWORDs */ if (size < 4) { memcpy(&val, buf, size); spin_lock_irqsave(&trans->reg_lock, flags); iwl_grab_nic_access(trans); iwl_write32(trans, HBUS_TARG_PRPH_WADDR, (addr & 0x0000FFFF) | ((size - 1) << 24)); iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); iwl_release_nic_access(trans); /* needed after consecutive writes w/o read */ mmiowb(); spin_unlock_irqrestore(&trans->reg_lock, flags); } else { if (size % 4) return -EINVAL; for (i = 0; i < size; i += 4) iwl_write_prph(trans, addr+i, *(u32 *)(buf+i)); } } else if (iwlagn_hw_valid_rtc_data_addr(addr) || (IWLAGN_RTC_INST_LOWER_BOUND <= addr && addr < IWLAGN_RTC_INST_UPPER_BOUND)) { _iwl_write_targ_mem_words(trans, addr, buf, size/4); } else return -EINVAL; return 0; }
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, struct iwl_tx_queue *txq, int tx_fifo_id, int scd_retry) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id = txq->q.id; int active = test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0; iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); txq->sched_retry = scd_retry; if (active) IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n", scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); else IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n", scd_retry ? "BA" : "AC/CMD", txq_id); }
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info *ctxt_info; struct iwl_context_info_rbd_cfg *rx_cfg; u32 control_flags = 0; int ret; ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL); if (!ctxt_info) return -ENOMEM; ctxt_info->version.version = 0; ctxt_info->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); /* size is in DWs */ ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF); control_flags = IWL_CTXT_INFO_RB_SIZE_4K | IWL_CTXT_INFO_TFD_FORMAT_LONG | RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << IWL_CTXT_INFO_RB_CB_SIZE_POS; ctxt_info->control.control_flags = cpu_to_le32(control_flags); /* initialize RX default queue */ rx_cfg = &ctxt_info->rbd_cfg; rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); /* initialize TX command queue */ ctxt_info->hcmd_cfg.cmd_queue_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); if (ret) { dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), ctxt_info, trans_pcie->ctxt_info_dma_addr); return ret; } trans_pcie->ctxt_info = ctxt_info; iwl_enable_interrupts(trans); /* Configure debug, if exists */ if (trans->dbg_dest_tlv) iwl_pcie_apply_destination(trans); /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); /* Context info will be released upon alive or failure to get one */ return 0; }
static int iwl_mvm_tm_send_hcmd(struct iwl_mvm *mvm, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_tm_cmd_request *hcmd_req = data_in->data; struct iwl_tm_cmd_request *cmd_resp; u32 reply_len, resp_size; struct iwl_rx_packet *pkt; struct iwl_host_cmd host_cmd = { .id = hcmd_req->id, .data[0] = hcmd_req->data, .len[0] = hcmd_req->len, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, .flags = CMD_SYNC, }; int ret; if (hcmd_req->want_resp) host_cmd.flags |= CMD_WANT_SKB; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &host_cmd); mutex_unlock(&mvm->mutex); if (ret) return ret; /* if no reply is required, we are done */ if (!(host_cmd.flags & CMD_WANT_SKB)) return 0; /* Retrieve response packet */ pkt = host_cmd.resp_pkt; if (!pkt) { IWL_ERR(mvm->trans, "HCMD received a null response packet\n"); return -ENOMSG; } reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; /* Set response data */ resp_size = sizeof(struct iwl_tm_cmd_request) + reply_len; cmd_resp = kzalloc(resp_size, GFP_KERNEL); if (!cmd_resp) { iwl_free_resp(&host_cmd); return -ENOMEM; } cmd_resp->id = hcmd_req->id; cmd_resp->len = reply_len; memcpy(cmd_resp->data, &(pkt->hdr), reply_len); iwl_free_resp(&host_cmd); data_out->data = cmd_resp; data_out->len = resp_size; return 0; } static void iwl_mvm_tm_execute_reg_ops(struct iwl_trans *trans, struct iwl_tm_regs_request *request, struct iwl_tm_regs_request *result) { struct iwl_tm_reg_op *cur_op; u32 idx, read_idx; for (idx = 0, read_idx = 0; idx < request->num; idx++) { cur_op = &request->reg_ops[idx]; if (cur_op->op_type == IWL_TM_REG_OP_READ) { cur_op->value = iwl_read32(trans, cur_op->address); memcpy(&result->reg_ops[read_idx], cur_op, sizeof(*cur_op)); read_idx++; } else { /* IWL_TM_REG_OP_WRITE is the only possible option */ iwl_write32(trans, cur_op->address, cur_op->value); } } } static int iwl_mvm_tm_reg_ops(struct iwl_trans *trans, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_tm_reg_op *cur_op; struct iwl_tm_regs_request *request = data_in->data; struct iwl_tm_regs_request *result; u32 result_size; u32 idx, read_idx; bool is_grab_nic_access_required = true; unsigned long flags; /* Calculate result size (result is returned only for read ops) */ for (idx = 0, read_idx = 0; idx < request->num; idx++) { if (request->reg_ops[idx].op_type == IWL_TM_REG_OP_READ) read_idx++; /* check if there is an operation that it is not */ /* in the CSR range (0x00000000 - 0x000003FF) */ /* and not in the AL range */ cur_op = &request->reg_ops[idx]; if (IS_AL_ADDR(cur_op->address) || (cur_op->address < HBUS_BASE)) is_grab_nic_access_required = false; } result_size = sizeof(struct iwl_tm_regs_request) + read_idx*sizeof(struct iwl_tm_reg_op); result = kzalloc(result_size, GFP_KERNEL); if (!result) return -ENOMEM; result->num = read_idx; if (is_grab_nic_access_required) { if (!iwl_trans_grab_nic_access(trans, false, &flags)) { kfree(result); return -EBUSY; } iwl_mvm_tm_execute_reg_ops(trans, request, result); iwl_trans_release_nic_access(trans, &flags); } else { iwl_mvm_tm_execute_reg_ops(trans, request, result); } data_out->data = result; data_out->len = result_size; return 0; } static int iwl_tm_get_dev_info(struct iwl_mvm *mvm, struct iwl_tm_data *data_out) { struct iwl_tm_dev_info *dev_info; const u8 driver_ver[] = IWLWIFI_VERSION; if (!mvm->nvm_data) return -EINVAL; dev_info = kzalloc(sizeof(struct iwl_tm_dev_info) + (strlen(driver_ver)+1)*sizeof(u8), GFP_KERNEL); if (!dev_info) return -ENOMEM; dev_info->dev_id = mvm->trans->hw_id; dev_info->fw_ver = mvm->fw->ucode_ver; dev_info->vendor_id = PCI_VENDOR_ID_INTEL; dev_info->silicon_step = mvm->nvm_data->radio_cfg_step; /* TODO: Assign real value when feature is implemented */ dev_info->build_ver = 0x00; strcpy(dev_info->driver_ver, driver_ver); data_out->data = dev_info; data_out->len = sizeof(*dev_info); return 0; } static int iwl_tm_indirect_read(struct iwl_mvm *mvm, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_trans *trans = mvm->trans; struct iwl_tm_sram_read_request *cmd_in = data_in->data; u32 addr = cmd_in->offset; u32 size = cmd_in->length; u32 *buf32, size32, i; unsigned long flags; if (size & (sizeof(u32)-1)) return -EINVAL; data_out->data = kmalloc(size, GFP_KERNEL); if (!data_out->data) return -ENOMEM; data_out->len = size; size32 = size / sizeof(u32); buf32 = data_out->data; mutex_lock(&mvm->mutex); /* Hard-coded periphery absolute address */ if (IWL_ABS_PRPH_START <= addr && addr < IWL_ABS_PRPH_START + PRPH_END) { if (!iwl_trans_grab_nic_access(trans, false, &flags)) { mutex_unlock(&mvm->mutex); return -EBUSY; } for (i = 0; i < size32; i++) buf32[i] = iwl_trans_read_prph(trans, addr + i * sizeof(u32)); iwl_trans_release_nic_access(trans, &flags); } else { /* target memory (SRAM) */ iwl_trans_read_mem(trans, addr, buf32, size32); } mutex_unlock(&mvm->mutex); return 0; } static int iwl_tm_indirect_write(struct iwl_mvm *mvm, struct iwl_tm_data *data_in) { struct iwl_trans *trans = mvm->trans; struct iwl_tm_sram_write_request *cmd_in = data_in->data; u32 addr = cmd_in->offset; u32 size = cmd_in->len; u8 *buf = cmd_in->buffer; u32 *buf32 = (u32 *)buf, size32 = size / sizeof(u32); unsigned long flags; u32 val, i; mutex_lock(&mvm->mutex); if (IWL_ABS_PRPH_START <= addr && addr < IWL_ABS_PRPH_START + PRPH_END) { /* Periphery writes can be 1-3 bytes long, or DWORDs */ if (size < 4) { memcpy(&val, buf, size); if (!iwl_trans_grab_nic_access(trans, false, &flags)) { mutex_unlock(&mvm->mutex); return -EBUSY; } iwl_write32(trans, HBUS_TARG_PRPH_WADDR, (addr & 0x000FFFFF) | ((size - 1) << 24)); iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); iwl_trans_release_nic_access(trans, &flags); } else { if (size % sizeof(u32)) { mutex_unlock(&mvm->mutex); return -EINVAL; } for (i = 0; i < size32; i++) iwl_write_prph(trans, addr + i*sizeof(u32), buf32[i]); } } else { iwl_trans_write_mem(trans, addr, buf32, size32); } mutex_unlock(&mvm->mutex); return 0; } /** * iwl_mvm_tm_cmd_execute - Implementation of test command executor callback * @op_mode: Specific device's operation mode * @cmd: User space command's index * @data_in: Input data. "data" field is to be casted to relevant * data structure. All verification must be done in the * caller function, therefor assuming that input data * length is valid. * @data_out: Will be allocated inside, freeing is in the caller's * responsibility */ int iwl_mvm_tm_cmd_execute(struct iwl_op_mode *op_mode, u32 cmd, struct iwl_tm_data *data_in, struct iwl_tm_data *data_out) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); if (WARN_ON_ONCE(!op_mode || !data_in)) return -EINVAL; switch (cmd) { case IWL_TM_USER_CMD_HCMD: return iwl_mvm_tm_send_hcmd(mvm, data_in, data_out); case IWL_TM_USER_CMD_REG_ACCESS: return iwl_mvm_tm_reg_ops(mvm->trans, data_in, data_out); case IWL_TM_USER_CMD_SRAM_WRITE: return iwl_tm_indirect_write(mvm, data_in); case IWL_TM_USER_CMD_SRAM_READ: return iwl_tm_indirect_read(mvm, data_in, data_out); case IWL_TM_USER_CMD_GET_DEVICE_INFO: return iwl_tm_get_dev_info(mvm, data_out); default: break; } return -EOPNOTSUPP; } /** * iwl_tm_mvm_send_rx() - Send a spontaneous rx message to user * @mvm: mvm opmode pointer * @rxb: Contains rx packet to be sent */ void iwl_tm_mvm_send_rx(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt; int length; pkt = rxb_addr(rxb); length = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; /* the length doesn't include len_n_flags field, so add it manually */ length += sizeof(__le32); iwl_tm_gnl_send_msg(mvm->trans, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT, true, (void *)pkt, length, GFP_ATOMIC); }
/* * This function handles the user application commands for register access. * * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the * handlers respectively. * * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32, * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating * the success of the command execution. * * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read * value is returned with IWL_TM_ATTR_REG_VALUE32. * * @hw: ieee80211_hw object that represents the device * @tb: gnl message fields from the user space */ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = hw->priv; u32 ofs, val32; u8 val8; struct sk_buff *skb; int status = 0; if (!tb[IWL_TM_ATTR_REG_OFFSET]) { IWL_DEBUG_INFO(priv, "Error finding register offset\n"); return -ENOMSG; } ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs); switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: val32 = iwl_read32(bus(priv), ofs); IWL_INFO(priv, "32bit value to read 0x%x\n", val32); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); return -ENOMEM; } NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); break; case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: if (!tb[IWL_TM_ATTR_REG_VALUE32]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; } else { val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); IWL_INFO(priv, "32bit value to write 0x%x\n", val32); iwl_write32(bus(priv), ofs, val32); } break; case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: if (!tb[IWL_TM_ATTR_REG_VALUE8]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; } else { val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); IWL_INFO(priv, "8bit value to write 0x%x\n", val8); iwl_write8(bus(priv), ofs, val8); } break; case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32: val32 = iwl_read_prph(bus(priv), ofs); IWL_INFO(priv, "32bit value to read 0x%x\n", val32); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); return -ENOMEM; } NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); break; case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32: if (!tb[IWL_TM_ATTR_REG_VALUE32]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; } else { val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); IWL_INFO(priv, "32bit value to write 0x%x\n", val32); iwl_write_prph(bus(priv), ofs, val32); } break; default: IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n"); return -ENOSYS; } return status; nla_put_failure: kfree_skb(skb); return -EMSGSIZE; }
/* * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask * must be called under priv->lock and mac access */ static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask) { iwl_write_prph(priv, SCD_TXFACT, mask); }
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; unsigned long flags; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; u32 offset = 0; if (WARN_ON(!rxb)) return; dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { struct iwl_rx_packet *pkt; struct iwl_device_cmd *cmd; u16 sequence; bool reclaim; int index, cmd_index, err, len; struct iwl_rx_cmd_buffer rxcb = { ._offset = offset, ._rx_page_order = trans_pcie->rx_page_order, ._page = rxb->page, ._page_stolen = false, .truesize = max_len, }; pkt = rxb_addr(&rxcb); if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) break; IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd), pkt->hdr.cmd); len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; len += sizeof(u32); /* account for status word */ trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. * If the packet (e.g. Rx frame) originated from uCode, * there is no command buffer to reclaim. * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); if (reclaim) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { reclaim = false; break; } } } sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); if (reclaim) cmd = txq->entries[cmd_index].cmd; else cmd = NULL; err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); if (reclaim) { kfree(txq->entries[cmd_index].free_buf); txq->entries[cmd_index].free_buf = NULL; } /* * After here, we should always check rxcb._page_stolen, * if it is true then one of the handlers took the page. */ if (reclaim) { /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) iwl_pcie_hcmd_complete(trans, &rxcb, err); else IWL_WARN(trans, "Claim null rxb?\n"); } page_stolen |= rxcb._page_stolen; offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } /* page was stolen from us -- free our reference */ if (page_stolen) { __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; } /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { /* * free the page(s) as well to not break * the invariant that the items on the used * list have no page(s) */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; list_add_tail(&rxb->list, &rxq->rx_used); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); } /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; u32 r, i; u8 fill_rx = 0; u32 count = 8; int total_empty; /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; i = rxq->read; /* Rx interrupt, but nothing sent from uCode */ if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); /* calculate total frames need to be restock after handling RX */ total_empty = r - rxq->write_actual; if (total_empty < 0) total_empty += RX_QUEUE_SIZE; if (total_empty > (RX_QUEUE_SIZE / 2)) fill_rx = 1; while (i != r) { struct iwl_rx_mem_buffer *rxb; rxb = rxq->queue[i]; rxq->queue[i] = NULL; IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", r, i, rxb); iwl_pcie_rx_handle_rb(trans, rxb); i = (i + 1) & RX_QUEUE_MASK; /* If there are a lot of unused frames, * restock the Rx queue so ucode wont assert. */ if (fill_rx) { count++; if (count >= 8) { rxq->read = i; iwl_pcie_rx_replenish_now(trans); count = 0; } } } /* Backtrack one entry */ rxq->read = i; if (fill_rx) iwl_pcie_rx_replenish_now(trans); else iwl_pcie_rxq_restock(trans); } /* * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card */ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) { clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); iwl_op_mode_wimax_active(trans->op_mode); wake_up(&trans_pcie->wait_command_queue); return; } iwl_pcie_dump_csr(trans); iwl_pcie_dump_fh(trans, NULL); set_bit(STATUS_FW_ERROR, &trans_pcie->status); clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); wake_up(&trans_pcie->wait_command_queue); local_bh_disable(); iwl_op_mode_nic_error(trans->op_mode); local_bh_enable(); } irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) { struct iwl_trans *trans = dev_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta = 0; u32 handled = 0; unsigned long flags; u32 i; lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock_irqsave(&trans_pcie->irq_lock, flags); /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, */ /* There is a hardware bug in the interrupt mask function that some * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if * they are disabled in the CSR_INT_MASK register. Furthermore the * ICT interrupt handling mechanism has another bug that might cause * these unmasked interrupts fail to be detected. We workaround the * hardware bugs here by ACKing all the possible interrupts so that * interrupt coalescing can still be achieved. */ iwl_write32(trans, CSR_INT, trans_pcie->inta | ~trans_pcie->inta_mask); inta = trans_pcie->inta; if (iwl_have_debug_level(IWL_DL_ISR)) IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", inta, iwl_read32(trans, CSR_INT_MASK)); /* saved interrupt in inta variable now we can reset trans_pcie->inta */ trans_pcie->inta = 0; spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); /* Now service all interrupt bits discovered above. */ if (inta & CSR_INT_BIT_HW_ERR) { IWL_ERR(trans, "Hardware error detected. Restarting.\n"); /* Tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); isr_stats->hw++; iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_HW_ERR; goto out; } if (iwl_have_debug_level(IWL_DL_ISR)) { /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { IWL_DEBUG_ISR(trans, "Scheduler finished to transmit the frame/frames.\n"); isr_stats->sch++; } /* Alive notification via Rx interrupt will do the real work */ if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; } } /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); /* HW RF KILL switch toggled */ if (inta & CSR_INT_BIT_RF_KILL) { bool hw_rfkill; hw_rfkill = iwl_is_rfkill_set(trans); IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio"); isr_stats->rfkill++; iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); if (hw_rfkill) { /* * Clear the interrupt in APMG if the NIC is going down. * Note that when the NIC exits RFkill (else branch), we * can't access prph and the NIC will be reset in * start_hw anyway. */ iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL); set_bit(STATUS_RFKILL, &trans_pcie->status); if (test_and_clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) IWL_DEBUG_RF_KILL(trans, "Rfkill while SYNC HCMD in flight\n"); wake_up(&trans_pcie->wait_command_queue); } else { clear_bit(STATUS_RFKILL, &trans_pcie->status); } handled |= CSR_INT_BIT_RF_KILL; } /* Chip got too hot and stopped itself */ if (inta & CSR_INT_BIT_CT_KILL) { IWL_ERR(trans, "Microcode CT kill error detected.\n"); isr_stats->ctkill++; handled |= CSR_INT_BIT_CT_KILL; } /* Error detected by uCode */ if (inta & CSR_INT_BIT_SW_ERR) { IWL_ERR(trans, "Microcode SW error detected. " " Restarting 0x%X.\n", inta); isr_stats->sw++; iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_SW_ERR; } /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]); isr_stats->wakeup++; handled |= CSR_INT_BIT_WAKEUP; } /* All uCode command responses, including Tx command responses, * Rx "responses" (frame-received notification), and other * notifications from uCode come through here*/ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | CSR_INT_BIT_RX_PERIODIC)) { IWL_DEBUG_ISR(trans, "Rx interrupt\n"); if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_RX_MASK); } if (inta & CSR_INT_BIT_RX_PERIODIC) { handled |= CSR_INT_BIT_RX_PERIODIC; iwl_write32(trans, CSR_INT, CSR_INT_BIT_RX_PERIODIC); } /* Sending RX interrupt require many steps to be done in the * the device: * 1- write interrupt to current index in ICT table. * 2- dma RX frame. * 3- update RX shared data to indicate last write index. * 4- send interrupt. * This could lead to RX race, driver could receive RX interrupt * but the shared data changes does not reflect this; * periodic interrupt will detect any dangling Rx activity. */ /* Disable periodic interrupt; we use it as just a one-shot. */ iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS); iwl_pcie_rx_handle(trans); /* * Enable periodic interrupt in 8 msec only if we received * real RX interrupt (instead of just periodic int), to catch * any dangling Rx interrupt. If it was just the periodic * interrupt, there was no dangling Rx activity, and no need * to extend the periodic interrupt; one-shot is enough. */ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_ENA); isr_stats->rx++; } /* This "Tx" DMA channel is used only for loading uCode */ if (inta & CSR_INT_BIT_FH_TX) { iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); isr_stats->tx++; handled |= CSR_INT_BIT_FH_TX; /* Wake up uCode load routine, now that load is complete */ trans_pcie->ucode_write_complete = true; wake_up(&trans_pcie->ucode_write_waitq); } if (inta & ~handled) { IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); isr_stats->unhandled++; } if (inta & ~(trans_pcie->inta_mask)) { IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", inta & ~trans_pcie->inta_mask); } /* Re-enable all interrupts */ /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status)) iwl_enable_interrupts(trans); /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_enable_rfkill_int(trans); out: lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_HANDLED; } /****************************************************************************** * * ICT functions * ******************************************************************************/ /* a device (PCI-E) page is 4096 bytes long */ #define ICT_SHIFT 12 #define ICT_SIZE (1 << ICT_SHIFT) #define ICT_COUNT (ICT_SIZE / sizeof(u32)) /* Free dram table */ void iwl_pcie_free_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->ict_tbl) { dma_free_coherent(trans->dev, ICT_SIZE, trans_pcie->ict_tbl, trans_pcie->ict_tbl_dma); trans_pcie->ict_tbl = NULL; trans_pcie->ict_tbl_dma = 0; } } /* * allocate dram shared table, it is an aligned memory * block of ICT_SIZE. * also reset all data related to ICT table interrupt. */ int iwl_pcie_alloc_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->ict_tbl = dma_alloc_coherent(trans->dev, ICT_SIZE, &trans_pcie->ict_tbl_dma, GFP_KERNEL); if (!trans_pcie->ict_tbl) return -ENOMEM; /* just an API sanity check ... it is guaranteed to be aligned */ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { iwl_pcie_free_ict(trans); return -EINVAL; } IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n", (unsigned long long)trans_pcie->ict_tbl_dma); IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); /* reset table and index to all 0 */ memset(trans_pcie->ict_tbl, 0, ICT_SIZE); trans_pcie->ict_index = 0; /* add periodic RX interrupt */ trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC; return 0; } /* Device is going up inform it about using ICT interrupt table, * also we need to tell the driver to start using ICT interrupt. */ void iwl_pcie_reset_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; unsigned long flags; if (!trans_pcie->ict_tbl) return; spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_disable_interrupts(trans); memset(trans_pcie->ict_tbl, 0, ICT_SIZE); val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; val |= CSR_DRAM_INT_TBL_ENABLE; val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); trans_pcie->use_ict = true; trans_pcie->ict_index = 0; iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); iwl_enable_interrupts(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); } /* Device is going down disable ict interrupt usage */ void iwl_pcie_disable_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; spin_lock_irqsave(&trans_pcie->irq_lock, flags); trans_pcie->use_ict = false; spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); } /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ static irqreturn_t iwl_pcie_isr(int irq, void *data) { struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 inta, inta_mask; lockdep_assert_held(&trans_pcie->irq_lock); trace_iwlwifi_dev_irq(trans->dev); /* Disable (but don't clear!) interrupts here to avoid * back-to-back ISRs and sporadic interrupts from our NIC. * If we have something to service, the irq thread will re-enable ints. * If we *don't* have something, we'll re-enable before leaving here. */ inta_mask = iwl_read32(trans, CSR_INT_MASK); iwl_write32(trans, CSR_INT_MASK, 0x00000000); /* Discover which interrupts are active/pending */ inta = iwl_read32(trans, CSR_INT); if (inta & (~inta_mask)) { IWL_DEBUG_ISR(trans, "We got a masked interrupt (0x%08x)...Ack and ignore\n", inta & (~inta_mask)); iwl_write32(trans, CSR_INT, inta & (~inta_mask)); inta &= inta_mask; } /* Ignore interrupt if there's nothing in NIC to service. * This may be due to IRQ shared with another device, * or due to sporadic interrupts thrown from our NIC. */ if (!inta) { IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); goto none; } if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { /* Hardware disappeared. It might have already raised * an interrupt */ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); return IRQ_HANDLED; } if (iwl_have_debug_level(IWL_DL_ISR)) IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, iwl_read32(trans, CSR_FH_INT_STATUS)); trans_pcie->inta |= inta; /* the thread will service interrupts and re-enable them */ if (likely(inta)) return IRQ_WAKE_THREAD; else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && !trans_pcie->inta) iwl_enable_interrupts(trans); return IRQ_HANDLED; none: /* re-enable interrupts here since we don't have anything to service. */ /* only Re-enable if disabled by irq and no schedules tasklet. */ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && !trans_pcie->inta) iwl_enable_interrupts(trans); return IRQ_NONE; } /* interrupt handler using ict table, with this interrupt driver will * stop using INTA register to get device's interrupt, reading this register * is expensive, device will write interrupts in ICT dram table, increment * index then will fire interrupt to driver, driver will OR all ICT table * entries from current index up to table entry with 0 value. the result is * the interrupt we need to service, driver will set the entries back to 0 and * set index. */ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) { struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie; u32 inta; u32 val = 0; u32 read; unsigned long flags; if (!trans) return IRQ_NONE; trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_irqsave(&trans_pcie->irq_lock, flags); /* dram interrupt table not set yet, * use legacy interrupt. */ if (unlikely(!trans_pcie->use_ict)) { irqreturn_t ret = iwl_pcie_isr(irq, data); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return ret; } trace_iwlwifi_dev_irq(trans->dev); /* Disable (but don't clear!) interrupts here to avoid * back-to-back ISRs and sporadic interrupts from our NIC. * If we have something to service, the tasklet will re-enable ints. * If we *don't* have something, we'll re-enable before leaving here. */ iwl_write32(trans, CSR_INT_MASK, 0x00000000); /* Ignore interrupt if there's nothing in NIC to service. * This may be due to IRQ shared with another device, * or due to sporadic interrupts thrown from our NIC. */ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); if (!read) { IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); goto none; } /* * Collect all entries up to the first 0, starting from ict_index; * note we already read at ict_index. */ do { val |= read; IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", trans_pcie->ict_index, read); trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; trans_pcie->ict_index = iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); } while (read); /* We should not get this value, just ignore it. */ if (val == 0xffffffff) val = 0; /* * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit * (bit 15 before shifting it to 31) to clear when using interrupt * coalescing. fortunately, bits 18 and 19 stay set when this happens * so we use them to decide on the real state of the Rx bit. * In order words, bit 15 is set if bit 18 or bit 19 are set. */ if (val & 0xC0000) val |= 0x8000; inta = (0xff & val) | ((0xff00 & val) << 16); IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n", inta, trans_pcie->inta_mask, val); if (iwl_have_debug_level(IWL_DL_ISR)) IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n", iwl_read32(trans, CSR_INT_MASK)); inta &= trans_pcie->inta_mask; trans_pcie->inta |= inta; /* iwl_pcie_tasklet() will service interrupts and re-enable them */ if (likely(inta)) { spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return IRQ_WAKE_THREAD; } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && !trans_pcie->inta) { /* Allow interrupt if was disabled by this handler and * no tasklet was schedules, We should not enable interrupt, * tasklet will enable it. */ iwl_enable_interrupts(trans); } spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return IRQ_HANDLED; none: /* re-enable interrupts here since we don't have anything to service. * only Re-enable if disabled by irq. */ if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && !trans_pcie->inta) iwl_enable_interrupts(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return IRQ_NONE; }
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); /* Stop this Tx queue before configuring it */ iwl_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD queue */ if (txq_id != trans_pcie->cmd_queue) iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); /* If this queue is mapped to a certain station: it is an AGG queue */ if (sta_id != IWL_INVALID_STATION) { u16 ra_tid = BUILD_RAxTID(sta_id, tid); /* Map receiver-address / traffic-ID to this queue */ iwl_txq_set_ratid_map(trans, ra_tid, txq_id); /* enable aggregations for the queue */ iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); } else { /* * disable aggregations for the queue, this will also make the * ra_tid mapping configuration irrelevant since it is now a * non-AGG queue. */ iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); } /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); iwl_write_direct32(trans, HBUS_TARG_WRPTR, (ssn & 0xff) | (txq_id << 8)); iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", txq_id, fifo, ssn & 0xff); }
/* * This function handles the user application commands for register access. * * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the * handlers respectively. * * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32, * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating * the success of the command execution. * * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read * value is returned with IWL_TM_ATTR_REG_VALUE32. * * @hw: ieee80211_hw object that represents the device * @tb: gnl message fields from the user space */ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) { struct iwl_priv *priv = hw->priv; u32 ofs, val32, cmd; u8 val8; struct sk_buff *skb; int status = 0; if (!tb[IWL_TM_ATTR_REG_OFFSET]) { IWL_DEBUG_INFO(priv, "Error finding register offset\n"); return -ENOMSG; } ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs); /* Allow access only to FH/CSR/HBUS in direct mode. Since we don't have the upper bounds for the CSR and HBUS segments, we will use only the upper bound of FH for sanity check. */ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 || cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 || cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) && (ofs >= FH_MEM_UPPER_BOUND)) { IWL_DEBUG_INFO(priv, "offset out of segment (0x0 - 0x%x)\n", FH_MEM_UPPER_BOUND); return -EINVAL; } switch (cmd) { case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: val32 = iwl_read_direct32(trans(priv), ofs); IWL_INFO(priv, "32bit value to read 0x%x\n", val32); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); return -ENOMEM; } NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); break; case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: if (!tb[IWL_TM_ATTR_REG_VALUE32]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; } else { val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); IWL_INFO(priv, "32bit value to write 0x%x\n", val32); iwl_write_direct32(trans(priv), ofs, val32); } break; case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: if (!tb[IWL_TM_ATTR_REG_VALUE8]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; } else { val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); IWL_INFO(priv, "8bit value to write 0x%x\n", val8); iwl_write8(trans(priv), ofs, val8); } break; case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32: val32 = iwl_read_prph(trans(priv), ofs); IWL_INFO(priv, "32bit value to read 0x%x\n", val32); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); if (!skb) { IWL_DEBUG_INFO(priv, "Error allocating memory\n"); return -ENOMEM; } NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); status = cfg80211_testmode_reply(skb); if (status < 0) IWL_DEBUG_INFO(priv, "Error sending msg : %d\n", status); break; case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32: if (!tb[IWL_TM_ATTR_REG_VALUE32]) { IWL_DEBUG_INFO(priv, "Error finding value to write\n"); return -ENOMSG; } else { val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); IWL_INFO(priv, "32bit value to write 0x%x\n", val32); iwl_write_prph(trans(priv), ofs, val32); } break; default: IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n"); return -ENOSYS; } return status; nla_put_failure: kfree_skb(skb); return -EMSGSIZE; }
int iwlagn_alive_notify(struct iwl_priv *priv) { const struct queue_to_fifo_ac *queue_to_fifo; u32 a; unsigned long flags; int i, chan; u32 reg_val; spin_lock_irqsave(&priv->lock, flags); priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR); a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET; for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET; a += 4) iwl_write_targ_mem(priv, a, 0); for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET; a += 4) iwl_write_targ_mem(priv, a, 0); for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) iwl_write_targ_mem(priv, a, 0); iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR, priv->scd_bc_tbls.dma >> 10); /* Enable DMA channel */ for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++) iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); /* Update FH chicken bits */ reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv)); iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0); /* initiate the queues */ for (i = 0; i < priv->hw_params.max_txq_num; i++) { iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0); iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); iwl_write_targ_mem(priv, priv->scd_base_addr + IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0); iwl_write_targ_mem(priv, priv->scd_base_addr + IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof(u32), ((SCD_WIN_SIZE << IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((SCD_FRAME_LIMIT << IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); } iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, IWL_MASK(0, priv->hw_params.max_txq_num)); /* Activate all Tx DMA/FIFO channels */ priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); /* map queues to FIFOs */ if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; else queue_to_fifo = iwlagn_default_queue_to_tx_fifo; iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0); /* make sure all queue are not stopped */ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); for (i = 0; i < 4; i++) atomic_set(&priv->queue_stop_count[i], 0); /* reset to 0 to enable all the queue first */ priv->txq_ctx_active_msk = 0; BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10); BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10); for (i = 0; i < 10; i++) { int fifo = queue_to_fifo[i].fifo; int ac = queue_to_fifo[i].ac; iwl_txq_ctx_activate(priv, i); if (fifo == IWL_TX_FIFO_UNUSED) continue; if (ac != IWL_AC_UNSET) iwl_set_swq_id(&priv->txq[i], ac, i); iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0); } spin_unlock_irqrestore(&priv->lock, flags); /* Enable L1-Active */ iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); iwlagn_send_wimax_coex(priv); iwlagn_set_Xtal_calib(priv); iwl_send_calib_results(priv); return 0; }
static int iwl5000_alive_notify(struct iwl_priv *priv) { u32 a; unsigned long flags; int i, chan; u32 reg_val; spin_lock_irqsave(&priv->lock, flags); priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR); a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET; for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET; a += 4) iwl_write_targ_mem(priv, a, 0); for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; a += 4) iwl_write_targ_mem(priv, a, 0); for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) iwl_write_targ_mem(priv, a, 0); iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, priv->scd_bc_tbls.dma >> 10); /* Enable DMA channel */ for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++) iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); /* Update FH chicken bits */ reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num)); iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0); /* initiate the queues */ for (i = 0; i < priv->hw_params.max_txq_num; i++) { iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0); iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); iwl_write_targ_mem(priv, priv->scd_base_addr + IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0); iwl_write_targ_mem(priv, priv->scd_base_addr + IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof(u32), ((SCD_WIN_SIZE << IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((SCD_FRAME_LIMIT << IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); } iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK, IWL_MASK(0, priv->hw_params.max_txq_num)); /* Activate all Tx DMA/FIFO channels */ priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); /* map qos queues to fifos one-to-one */ for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { int ac = iwl5000_default_queue_to_tx_fifo[i]; iwl_txq_ctx_activate(priv, i); iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0); } /* TODO - need to initialize those FIFOs inside the loop above, * not only mark them as active */ iwl_txq_ctx_activate(priv, 4); iwl_txq_ctx_activate(priv, 7); iwl_txq_ctx_activate(priv, 8); iwl_txq_ctx_activate(priv, 9); spin_unlock_irqrestore(&priv->lock, flags); iwl5000_send_wimax_coex(priv); iwl5000_set_Xtal_calib(priv); iwl_send_calib_results(priv); return 0; }
/* * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask * must be called under priv->lock and mac access */ static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask) { iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); }