int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; } if (cmd->flags & CMD_ASYNC) { int ret; /* An asynchronous command can not expect an SKB to be set. */ if (WARN_ON(cmd->flags & CMD_WANT_SKB)) return -EINVAL; ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", iwl_get_cmd_string(trans, cmd->id), ret); return ret; } return 0; } return iwl_pcie_gen2_send_hcmd_sync(trans, cmd); }
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb, bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; u32 offset = 0; if (WARN_ON(!rxb)) return; dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { struct iwl_rx_packet *pkt; u16 sequence; bool reclaim; int index, cmd_index, len; struct iwl_rx_cmd_buffer rxcb = { ._offset = offset, ._rx_page_order = trans_pcie->rx_page_order, ._page = rxb->page, ._page_stolen = false, .truesize = max_len, }; pkt = rxb_addr(&rxcb); if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) break; IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", rxcb._offset, iwl_get_cmd_string(trans, iwl_cmd_id(pkt->hdr.cmd, pkt->hdr.group_id, 0)), pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); len = iwl_rx_packet_len(pkt); len += sizeof(u32); /* account for status word */ trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. * If the packet (e.g. Rx frame) originated from uCode, * there is no command buffer to reclaim. * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); if (reclaim) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { reclaim = false; break; } } } sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); if (reclaim) { kzfree(txq->entries[cmd_index].free_buf); txq->entries[cmd_index].free_buf = NULL; } /* * After here, we should always check rxcb._page_stolen, * if it is true then one of the handlers took the page. */ if (reclaim) { /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) iwl_pcie_hcmd_complete(trans, &rxcb); else IWL_WARN(trans, "Claim null rxb?\n"); } page_stolen |= rxcb._page_stolen; offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } /* page was stolen from us -- free our reference */ if (page_stolen) { __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; } /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { /* * free the page(s) as well to not break * the invariant that the items on the used * list have no page(s) */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; u32 r, i, j, count = 0; bool emergency = false; restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; i = rxq->read; /* Rx interrupt, but nothing sent from uCode */ if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); while (i != r) { struct iwl_rx_mem_buffer *rxb; if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) emergency = true; rxb = rxq->queue[i]; rxq->queue[i] = NULL; IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); iwl_pcie_rx_handle_rb(trans, rxb, emergency); i = (i + 1) & RX_QUEUE_MASK; /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - * try to claim the pre-allocated buffers from the allocator */ if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { struct iwl_rb_allocator *rba = &trans_pcie->rba; struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { /* Add the remaining 6 empty RBDs * for allocator use */ spin_lock(&rba->lock); list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); spin_unlock(&rba->lock); } /* If not ready - continue, will try to reclaim later. * No need to reschedule work - allocator exits only on * success */ if (!iwl_pcie_rx_allocator_get(trans, out)) { /* If success - then RX_CLAIM_REQ_ALLOC * buffers were retrieved and should be added * to free list */ rxq->used_count -= RX_CLAIM_REQ_ALLOC; for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { list_add_tail(&out[j]->list, &rxq->rx_free); rxq->free_count++; } } } if (emergency) { count++; if (count == 8) { count = 0; if (rxq->used_count < RX_QUEUE_SIZE / 3) emergency = false; spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); spin_lock(&rxq->lock); } } /* handle restock for three cases, can be all of them at once: * - we just pulled buffers from the allocator * - we have 8+ unstolen pages accumulated * - we are in emergency and allocated buffers */ if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { rxq->read = i; spin_unlock(&rxq->lock); iwl_pcie_rxq_restock(trans); goto restart; } } /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); /* * handle a case where in emergency there are some unallocated RBDs. * those RBDs are in the used list, but are not tracked by the queue's * used_count which counts allocator owned RBDs. * unallocated emergency RBDs must be allocated on exit, otherwise * when called again the function may not be in emergency mode and * they will be handed to the allocator with no tracking in the RBD * allocator counters, which will lead to them never being claimed back * by the queue. * by allocating them here, they are now in the queue free list, and * will be restocked by the next call of iwl_pcie_rxq_restock. */ if (unlikely(emergency && count)) iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); } /* * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card */ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && !trans->cfg->apmg_not_supported && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) { clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); iwl_op_mode_wimax_active(trans->op_mode); wake_up(&trans_pcie->wait_command_queue); return; } iwl_pcie_dump_csr(trans); iwl_dump_fh(trans, NULL); local_bh_disable(); /* The STATUS_FW_ERROR bit is set in this function. This must happen * before we wake up the command caller, to ensure a proper cleanup. */ iwl_trans_fw_error(trans); local_bh_enable(); for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) del_timer(&trans_pcie->txq[i].stuck_timer); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans_pcie->wait_command_queue); } static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) { u32 inta; lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); trace_iwlwifi_dev_irq(trans->dev); /* Discover which interrupts are active/pending */ inta = iwl_read32(trans, CSR_INT); /* the thread will service interrupts and re-enable them */ return inta; }
static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; int cmd_idx; int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), "Command %s: a command is already active!\n", cmd_str)) return -EIO; IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { ret = wait_event_timeout(trans_pcie->d0i3_waitq, pm_runtime_active(&trans_pcie->pci_dev->dev), msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); if (!ret) { IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); return -ETIMEDOUT; } } cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", cmd_str, ret); return ret; } ret = wait_event_timeout(trans_pcie->wait_command_queue, !test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { IWL_ERR(trans, "Error sending %s: time out after %dms.\n", cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", txq->read_ptr, txq->write_ptr); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", cmd_str); ret = -ETIMEDOUT; iwl_force_nmi(trans); iwl_trans_fw_error(trans); goto cancel; } if (test_bit(STATUS_FW_ERROR, &trans->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); dump_stack(); ret = -EIO; goto cancel; } if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); ret = -EIO; goto cancel; } return 0; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { iwl_free_resp(cmd); cmd->resp_pkt = NULL; } return ret; }
/* * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a pointer to the ucode command structure * * The function returns < 0 values to indicate the operation * failed. On success, it returns the index (>= 0) of command in the * command queue. */ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; int i, cmd_pos, idx; u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_tfh_tfd *tfd; copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { cmddata[i] = cmd->data[i]; cmdlen[i] = cmd->len[i]; if (!cmd->len[i]) continue; /* need at least IWL_FIRST_TB_SIZE copied */ if (copy_size < IWL_FIRST_TB_SIZE) { int copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmdlen[i]) copy = cmdlen[i]; cmdlen[i] -= copy; cmddata[i] += copy; copy_size += copy; } if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { idx = -EINVAL; goto free_dup_buf; } } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { /* * This is also a chunk that isn't copied * to the static buffer so set had_nocopy. */ had_nocopy = true; /* only allowed once */ if (WARN_ON(dup_buf)) { idx = -EINVAL; goto free_dup_buf; } dup_buf = kmemdup(cmddata[i], cmdlen[i], GFP_ATOMIC); if (!dup_buf) return -ENOMEM; } else { /* NOCOPY must not be followed by normal! */ if (WARN_ON(had_nocopy)) { idx = -EINVAL; goto free_dup_buf; } copy_size += cmdlen[i]; } cmd_size += cmd->len[i]; } /* * If any of the command structures end up being larger than the * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into * separate TFDs, then we will need to increase the size of the buffers */ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, "Command %s (%#x) is too large (%d bytes)\n", iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { idx = -EINVAL; goto free_dup_buf; } spin_lock_bh(&txq->lock); idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); idx = -ENOSPC; goto free_dup_buf; } out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; /* re-initialize to NULL */ memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; /* set up the header */ out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr_wide.group_id = group_id; out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); out_cmd->hdr_wide.length = cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); copy_size = sizeof(struct iwl_cmd_header_wide); /* and copy the data that needs to be copied */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy; if (!cmd->len[i]) continue; /* copy everything if not nocopy/dup */ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) { copy = cmd->len[i]; memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; copy_size += copy; continue; } /* * Otherwise we need at least IWL_FIRST_TB_SIZE copied * in total (for bi-directional DMA), but copy up to what * we can fit into the payload for debug dump purposes. */ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; /* However, treat copy_size the proper way, we need it below */ if (copy_size < IWL_FIRST_TB_SIZE) { copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmd->len[i]) copy = cmd->len[i]; copy_size += copy; } } IWL_DEBUG_HC(trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), tb0_size); /* map first command fragment, if any remains */ if (copy_size > tb0_size) { phys_addr = dma_map_single(trans->dev, ((u8 *)&out_cmd->hdr) + tb0_size, copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { idx = -ENOMEM; iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); goto out; } iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, copy_size - tb0_size); } /* map the remaining (adjusted) nocopy/dup fragments */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { const void *data = cmddata[i]; if (!cmdlen[i]) continue; if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) data = dup_buf; phys_addr = dma_map_single(trans->dev, (void *)data, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { idx = -ENOMEM; iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); goto out; } iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); } BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); spin_lock_irqsave(&trans_pcie->reg_lock, flags); if (!(cmd->flags & CMD_SEND_IN_IDLE) && !trans_pcie->ref_cmd_in_flight) { trans_pcie->ref_cmd_in_flight = true; IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); iwl_trans_ref(trans); } /* Increment and update queue's write index */ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); out: spin_unlock_bh(&txq->lock); free_dup_buf: if (idx < 0) kfree(dup_buf); return idx; }