void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, struct sk_buff *skb) { struct ath10k *ar = ep->htc->ar; ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__, ep->eid, skb); ath10k_htc_restore_tx_skb(ep->htc, skb); if (!ep->ep_ops.ep_tx_complete) { ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid); dev_kfree_skb_any(skb); return; } ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); }
static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) { struct sk_buff *skb; struct ath10k_skb_cb *skb_cb; skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); if (!skb) return NULL; skb_reserve(skb, 20); /* FIXME: why 20 bytes? */ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); skb_cb = ATH10K_SKB_CB(skb); memset(skb_cb, 0, sizeof(*skb_cb)); ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb); return skb; }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret, size; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); mtx_init(&htt->tx_lock, device_get_nameunit(ar->sc_dev), "athp htt tx", MTX_DEF); mtx_init(&htt->tx_comp_lock, device_get_nameunit(ar->sc_dev), "athp htt comp tx", MTX_DEF); idr_init(&htt->pending_tx); htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->sc_dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { ret = -ENOMEM; goto free_idr_pending_tx; } if (!ar->hw_params.continuous_frag_desc) goto skip_frag_desc_alloc; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); if (athp_descdma_alloc(ar, &htt->frag_desc.dd, "htt frag_desc", 8, size) != 0) { ath10k_warn(ar, "failed to alloc fragment desc memory\n"); ret = -ENOMEM; goto free_tx_pool; } htt->frag_desc.vaddr = (void *) htt->frag_desc.dd.dd_desc; htt->frag_desc.paddr = htt->frag_desc.dd.dd_desc_paddr; skip_frag_desc_alloc: return 0; free_tx_pool: dma_pool_destroy(htt->tx_pool); free_idr_pending_tx: mtx_destroy(&htt->tx_lock); idr_destroy(&htt->pending_tx); return ret; }
static int ath10k_push_board_ext_data(struct ath10k *ar, const struct firmware *fw) { u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; u32 board_ext_data_addr; int ret; ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); if (ret) { ath10k_err("could not read board ext data addr (%d)\n", ret); return ret; } ath10k_dbg(ATH10K_DBG_CORE, "ath10k: Board extended Data download addr: 0x%x\n", board_ext_data_addr); if (board_ext_data_addr == 0) return 0; if (fw->size != (board_data_size + board_ext_data_size)) { ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", fw->size, board_data_size, board_ext_data_size); return -EINVAL; } ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, fw->data + board_data_size, board_ext_data_size); if (ret) { ath10k_err("could not write board ext data (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, (board_ext_data_size << 16) | 1); if (ret) { ath10k_err("could not write board ext data bit (%d)\n", ret); return ret; } return 0; }
static int ath10k_push_board_ext_data(struct ath10k *ar) { u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; u32 board_ext_data_addr; int ret; ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); if (ret) { ath10k_err("could not read board ext data addr (%d)\n", ret); return ret; } ath10k_dbg(ATH10K_DBG_BOOT, "boot push board extended data addr 0x%x\n", board_ext_data_addr); if (board_ext_data_addr == 0) return 0; if (ar->board_len != (board_data_size + board_ext_data_size)) { ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", ar->board_len, board_data_size, board_ext_data_size); return -EINVAL; } ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, ar->board_data + board_data_size, board_ext_data_size); if (ret) { ath10k_err("could not write board ext data (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, (board_ext_data_size << 16) | 1); if (ret) { ath10k_err("could not write board ext data bit (%d)\n", ret); return ret; } return 0; }
int ath10k_htc_start(struct ath10k_htc *htc) { struct ath10k *ar = htc->ar; struct sk_buff *skb; int status = 0; struct ath10k_htc_msg *msg; skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) return -ENOMEM; skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext)); memset(skb->data, 0, skb->len); msg = (struct ath10k_htc_msg *)skb->data; msg->hdr.message_id = __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); if (ar->hif.bus == ATH10K_BUS_SDIO) { /* Extra setup params used by SDIO */ msg->setup_complete_ext.flags = __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN); msg->setup_complete_ext.max_msgs_per_bundled_recv = htc->max_msgs_per_htc_bundle; } ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { kfree_skb(skb); return status; } if (ath10k_htc_pktlog_svc_supported(ar)) { status = ath10k_htc_pktlog_connect(ar); if (status) { ath10k_err(ar, "failed to connect to pktlog: %d\n", status); return status; } } return 0; }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { idr_destroy(&htt->pending_tx); return -ENOMEM; } return 0; }
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) { struct htt_tx_done tx_done = {0}; int msdu_id; spin_lock_bh(&htt->tx_lock); for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.discard = 1; tx_done.msdu_id = msdu_id; ath10k_txrx_tx_unref(htt, &tx_done); } spin_unlock_bh(&htt->tx_lock); }
int ath10k_bmi_read_memory(struct ath10k *ar, u32 address, char *buffer, u32 length) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem); u32 rxlen; int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", address, length); if (ar->bmi.done_sent) { ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } while (length) { rxlen = MIN(length, BMI_MAX_DATA_SIZE); cmd.id = __cpu_to_le32(BMI_READ_MEMORY); cmd.read_mem.addr = __cpu_to_le32(address); cmd.read_mem.len = __cpu_to_le32(rxlen); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &rxlen); if (ret) { ath10k_warn(ar, "unable to read from the device (%d)\n", ret); return ret; } memcpy(buffer, resp.read_mem.payload, rxlen); address += rxlen; buffer += rxlen; length -= rxlen; } return 0; }
static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { int ret; ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); if (ret == 0) { ar->fw_api = 2; goto out; } ret = ath10k_core_fetch_firmware_api_1(ar); if (ret) return ret; ar->fw_api = 1; out: ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); return 0; }
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) { struct htt_tx_done tx_done = {0}; int msdu_id; /* No locks needed. Called after communication with the device has * been stopped. */ for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.discard = 1; tx_done.msdu_id = msdu_id; ath10k_txrx_tx_unref(htt, &tx_done); } }
static int ath10k_download_cal_file(struct ath10k *ar) { int ret; if (!ar->cal_file) return -ENOENT; if (IS_ERR(ar->cal_file)) return PTR_ERR(ar->cal_file); ret = ath10k_download_board_data(ar, ar->cal_file->data, ar->cal_file->size); if (ret) { ath10k_err(ar, "failed to download cal_file data: %d\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n"); return 0; }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret, size; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { ret = -ENOMEM; goto free_idr_pending_tx; } if (!ar->hw_params.continuous_frag_desc) goto skip_frag_desc_alloc; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, &htt->frag_desc.paddr, GFP_DMA); if (!htt->frag_desc.vaddr) { ath10k_warn(ar, "failed to alloc fragment desc memory\n"); ret = -ENOMEM; goto free_tx_pool; } skip_frag_desc_alloc: return 0; free_tx_pool: dma_pool_destroy(htt->tx_pool); free_idr_pending_tx: idr_destroy(&htt->pending_tx); return ret; }
int ath10k_bmi_read_memory(struct ath10k *ar, u32 address, void *buffer, u32 length) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem); u32 rxlen; int ret; if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; } ath10k_dbg(ATH10K_DBG_CORE, "%s: (device: 0x%p, address: 0x%x, length: %d)\n", __func__, ar, address, length); while (length) { rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); cmd.id = __cpu_to_le32(BMI_READ_MEMORY); cmd.read_mem.addr = __cpu_to_le32(address); cmd.read_mem.len = __cpu_to_le32(rxlen); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &rxlen); if (ret) { ath10k_warn("unable to read from the device\n"); return ret; } memcpy(buffer, resp.read_mem.payload, rxlen); address += rxlen; buffer += rxlen; length -= rxlen; } return 0; }
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, const char *buffer, u32 length) { u8 trailer[4] = {}; u32 head_len = rounddown(length, 4); u32 trailer_len = length - head_len; int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi fast download address 0x%x buffer 0x%p length %d\n", address, buffer, length); ret = ath10k_bmi_lz_stream_start(ar, address); if (ret) return ret; /* copy the last word into a zero padded buffer */ if (trailer_len > 0) memcpy(trailer, buffer + head_len, trailer_len); ret = ath10k_bmi_lz_data(ar, buffer, head_len); if (ret) return ret; if (trailer_len > 0) ret = ath10k_bmi_lz_data(ar, trailer, 4); if (ret != 0) return ret; /* * Close compressed stream and open a new (fake) one. * This serves mainly to flush Target caches. */ ret = ath10k_bmi_lz_stream_start(ar, 0x00); return ret; }
static void athp_pci_regio_s_write_reg(void *arg, uint32_t reg, uint32_t val) { struct ath10k_pci *ar_pci = arg; struct ath10k *ar = &ar_pci->sc_sc; int tmp; tmp = ath10k_pci_wake(ar_pci); if (tmp) { device_printf(ar->sc_dev, "%s: (reg=0x%08x) couldn't wake; err=%d\n", __func__, reg, tmp); return; } ath10k_dbg(ar, ATH10K_DBG_REGIO, "%s: %08x <- %08x\n", __func__, reg, val); bus_space_write_4(ar_pci->sc_st, ar_pci->sc_sh, reg, val); ath10k_pci_sleep(ar_pci); }
static int ath10k_ahb_hif_power_up(struct ath10k *ar, enum ath10k_firmware_mode fw_mode) { int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n"); ret = ath10k_ahb_chip_reset(ar); if (ret) { ath10k_err(ar, "failed to reset chip: %d\n", ret); goto out; } ret = ath10k_pci_init_pipes(ar); if (ret) { ath10k_err(ar, "failed to initialize CE: %d\n", ret); goto out; } ret = ath10k_pci_init_config(ar); if (ret) { ath10k_err(ar, "failed to setup init config: %d\n", ret); goto err_ce_deinit; } ret = ath10k_ahb_wake_target_cpu(ar); if (ret) { ath10k_err(ar, "could not wake up target CPU: %d\n", ret); goto err_ce_deinit; } return 0; err_ce_deinit: ath10k_pci_ce_deinit(ar); out: return ret; }
int ath10k_bmi_lz_data(struct ath10k *ar, const char *buffer, u32 length) { struct bmi_cmd cmd; u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data); u32 txlen; int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", buffer, length); if (ar->bmi.done_sent) { ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } while (length) { txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); WARN_ON(txlen & 3); cmd.id = __cpu_to_le32(BMI_LZ_DATA); cmd.lz_data.len = __cpu_to_le32(txlen); memcpy(cmd.lz_data.payload, buffer, txlen); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { ath10k_warn(ar, "unable to write to the device\n"); return ret; } buffer += txlen; length -= txlen; } return 0; }
static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) { u32 address, data_len; const char *mode_name; const void *data; int ret; address = ar->hw_params.patch_load_addr; switch (mode) { case ATH10K_FIRMWARE_MODE_NORMAL: data = ar->firmware_data; data_len = ar->firmware_len; mode_name = "normal"; break; case ATH10K_FIRMWARE_MODE_UTF: data = ar->testmode.utf->data; data_len = ar->testmode.utf->size; mode_name = "utf"; break; default: ath10k_err(ar, "unknown firmware mode: %d\n", mode); return -EINVAL; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot uploading firmware image %p len %d mode %s\n", data, data_len, mode_name); ret = ath10k_bmi_fast_download(ar, address, data, data_len); if (ret) { ath10k_err(ar, "failed to download %s firmware: %d\n", mode_name, ret); return ret; } return ret; }
void athp_freebuf(struct ath10k *ar, struct athp_buf_ring *br, struct athp_buf *bf) { struct ath10k_skb_cb *cb = ATH10K_SKB_CB(bf); /* Complain if the buffer has a noderef left */ if (cb->ni != NULL) { ath10k_err(ar, "%s: TODO: pbuf=%p, mbuf=%p, ni is not null (%p) !\n", __func__, bf, bf->m, cb->ni); } ATHP_BUF_LOCK(ar); if (br->btype != bf->btype) { ath10k_err(ar, "%s: ERROR: bf=%p, bf btype=%d, ring btype=%d\n", __func__, bf, bf->btype, br->btype); } ath10k_dbg(ar, ATH10K_DBG_PBUF, "%s: br=%d, m=%p, bf=%p, paddr=0x%lx\n", __func__, br->btype, bf->m, bf, bf->mb.paddr); /* if there's an mbuf - unmap (if needed) and free it */ if (bf->m != NULL) _athp_free_buf(ar, br, bf); /* Push it into the inactive queue */ TAILQ_INSERT_TAIL(&br->br_inactive, bf, next); ATHP_BUF_UNLOCK(ar); }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; spin_lock_init(&htt->tx_lock); if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; else htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * htt->max_num_pending_tx, GFP_KERNEL); if (!htt->pending_tx) return -ENOMEM; htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * BITS_TO_LONGS(htt->max_num_pending_tx), GFP_KERNEL); if (!htt->used_msdu_ids) { kfree(htt->pending_tx); return -ENOMEM; } htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { kfree(htt->used_msdu_ids); kfree(htt->pending_tx); return -ENOMEM; } return 0; }
static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); nentries = roundup_pow_of_two(attr->src_nentries); memset(src_ring->base_addr_owner_space, 0, nentries * sizeof(struct ce_desc)); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index &= src_ring->nentries_mask; src_ring->hw_index = src_ring->sw_index; src_ring->write_index = ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); src_ring->write_index &= src_ring->nentries_mask; ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, src_ring->base_addr_ce_space); ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot init ce src ring id %d entries %d base_addr %pK\n", ce_id, nentries, src_ring->base_addr_owner_space); return 0; }
int ath10k_bmi_get_target_info(struct ath10k *ar, struct bmi_target_info *target_info) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info); u32 resplen = sizeof(resp.get_target_info); int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n"); if (ar->bmi.done_sent) { ath10k_warn(ar, "BMI Get Target Info Command disallowed\n"); return -EBUSY; } cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { ath10k_warn(ar, "unable to get target info from device\n"); return ret; } if (resplen < sizeof(resp.get_target_info)) { ath10k_warn(ar, "invalid get_target_info response length (%d)\n", resplen); return -EIO; } target_info->version = __le32_to_cpu(resp.get_target_info.version); target_info->type = __le32_to_cpu(resp.get_target_info.type); return 0; }
int athp_taskq_queue(struct ath10k *ar, struct athp_taskq_entry *e, const char *str, athp_taskq_cmd_cb *cb) { struct ieee80211com *ic = &ar->sc_ic; struct athp_taskq_head *h; int do_run = 0; h = ar->sc_taskq_head; if (h == NULL) return (EINVAL); ath10k_dbg(ar, ATH10K_DBG_TASKQ, "%s: queuing cb %s %p (ptr %p)\n", __func__, str, cb, e); e->ar = ar; e->on_queue = 1; e->cb = cb; e->cb_str = str; ATHP_TASKQ_LOCK(h); e->on_queue = 1; TAILQ_INSERT_TAIL(&h->list, e, node); if (h->is_running) do_run = 1; ATHP_TASKQ_UNLOCK(h); if (do_run) ieee80211_runtask(ic, &h->run_task); return (0); }
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute); u32 resplen = sizeof(resp.execute); int ret; if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; } ath10k_dbg(ATH10K_DBG_BMI, "%s: (device: 0x%p, address: 0x%x, param: %d)\n", __func__, ar, address, *param); cmd.id = __cpu_to_le32(BMI_EXECUTE); cmd.execute.addr = __cpu_to_le32(address); cmd.execute.param = __cpu_to_le32(*param); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { ath10k_warn("unable to read from the device\n"); return ret; } if (resplen < sizeof(resp.execute)) { ath10k_warn("invalid execute response length (%d)\n", resplen); return ret; } *param = __le32_to_cpu(resp.execute.result); return 0; }
int ath10k_swap_code_seg_configure(struct ath10k *ar, const struct ath10k_fw_file *fw_file) { int ret; struct ath10k_swap_code_seg_info *seg_info = NULL; if (!fw_file->firmware_swap_code_seg_info) return 0; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); seg_info = fw_file->firmware_swap_code_seg_info; ret = ath10k_bmi_write_memory(ar, seg_info->target_addr, &seg_info->seg_hw_info, sizeof(seg_info->seg_hw_info)); if (ret) { ath10k_err(ar, "failed to write Code swap segment information (%d)\n", ret); return ret; } return 0; }
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct sk_buff *txfrag = NULL; u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; u8 tid; int prefetch_len, desc_len, frag_len; dma_addr_t frags_paddr; int msdu_id = -1; int res; u8 flags0; u16 flags1; res = ath10k_htt_tx_inc_pending(htt); if (res) return res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; frag_len = sizeof(*tx_frags) * 2; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; goto err; } txfrag = dev_alloc_skb(frag_len); if (!txfrag) { res = -ENOMEM; goto err; } if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; goto err; } spin_lock_bh(&htt->tx_lock); msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); if (msdu_id < 0) { spin_unlock_bh(&htt->tx_lock); res = msdu_id; goto err; } htt->pending_tx[msdu_id] = txdesc; spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) goto err; /* tx fragment list must be terminated with zero-entry */ skb_put(txfrag, frag_len); tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); tx_frags[0].len = __cpu_to_le32(msdu->len); tx_frags[1].paddr = __cpu_to_le32(0); tx_frags[1].len = __cpu_to_le32(0); res = ath10k_skb_map(dev, txfrag); if (res) goto err; ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", txfrag->data, frag_len); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, desc_len); tid = ATH10K_SKB_CB(msdu)->htt.tid; ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); flags0 = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); cmd->data_tx.len = __cpu_to_le16(msdu->len); cmd->data_tx.id = __cpu_to_le16(msdu_id); cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); /* refcount is decremented by HTC and HTT completions until it reaches * zero and is freed */ skb_cb = ATH10K_SKB_CB(txdesc); skb_cb->htt.msdu_id = msdu_id; skb_cb->htt.refcount = 2; skb_cb->htt.txfrag = txfrag; skb_cb->htt.msdu = msdu; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; return 0; err: if (txfrag) ath10k_skb_unmap(dev, txfrag); if (txdesc) dev_kfree_skb_any(txdesc); if (txfrag) dev_kfree_skb_any(txfrag); if (msdu_id >= 0) { spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); } ath10k_htt_tx_dec_pending(htt); ath10k_skb_unmap(dev, msdu); return res; }
static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) { size_t magic_len, len, ie_len; int ie_id, i, index, bit, ret; struct ath10k_fw_ie *hdr; const u8 *data; __le32 *timestamp, *version; /* first fetch the firmware file (firmware-*.bin) */ ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); if (IS_ERR(ar->firmware)) { ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n", ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware)); return PTR_ERR(ar->firmware); } data = ar->firmware->data; len = ar->firmware->size; /* magic also includes the null byte, check that as well */ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; if (len < magic_len) { ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n", ar->hw_params.fw.dir, name, len); ret = -EINVAL; goto err; } if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { ath10k_err(ar, "invalid firmware magic\n"); ret = -EINVAL; goto err; } /* jump over the padding */ magic_len = ALIGN(magic_len, 4); len -= magic_len; data += magic_len; /* loop elements */ while (len > sizeof(struct ath10k_fw_ie)) { hdr = (struct ath10k_fw_ie *)data; ie_id = le32_to_cpu(hdr->id); ie_len = le32_to_cpu(hdr->len); len -= sizeof(*hdr); data += sizeof(*hdr); if (len < ie_len) { ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", ie_id, len, ie_len); ret = -EINVAL; goto err; } switch (ie_id) { case ATH10K_FW_IE_FW_VERSION: if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1) break; memcpy(ar->hw->wiphy->fw_version, data, ie_len); ar->hw->wiphy->fw_version[ie_len] = '\0'; ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw version %s\n", ar->hw->wiphy->fw_version); break; case ATH10K_FW_IE_TIMESTAMP: if (ie_len != sizeof(u32)) break; timestamp = (__le32 *)data; ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n", le32_to_cpup(timestamp)); break; case ATH10K_FW_IE_FEATURES: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found firmware features ie (%zd B)\n", ie_len); for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { index = i / 8; bit = i % 8; if (index == ie_len) break; if (data[index] & (1 << bit)) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "Enabling feature bit: %i\n", i); __set_bit(i, ar->fw_features); } } ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", ar->fw_features, sizeof(ar->fw_features)); break; case ATH10K_FW_IE_FW_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw image ie (%zd B)\n", ie_len); ar->firmware_data = data; ar->firmware_len = ie_len; break; case ATH10K_FW_IE_OTP_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found otp image ie (%zd B)\n", ie_len); ar->otp_data = data; ar->otp_len = ie_len; break; case ATH10K_FW_IE_WMI_OP_VERSION: if (ie_len != sizeof(u32)) break; version = (__le32 *)data; ar->wmi.op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n", ar->wmi.op_version); break; case ATH10K_FW_IE_HTT_OP_VERSION: if (ie_len != sizeof(u32)) break; version = (__le32 *)data; ar->htt.op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n", ar->htt.op_version); break; case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw code swap image ie (%zd B)\n", ie_len); ar->swap.firmware_codeswap_data = data; ar->swap.firmware_codeswap_len = ie_len; break; default: ath10k_warn(ar, "Unknown FW IE: %u\n", le32_to_cpu(hdr->id)); break; } /* jump over the padding */ ie_len = ALIGN(ie_len, 4); len -= ie_len; data += ie_len; } if (!ar->firmware_data || !ar->firmware_len) { ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", ar->hw_params.fw.dir, name); ret = -ENOMEDIUM; goto err; } return 0; err: ath10k_core_free_firmware_files(ar); return ret; }
static void ath10k_send_suspend_complete(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n"); complete(&ar->target_suspend); }
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) { int status; lockdep_assert_held(&ar->conf_mutex); clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); ath10k_bmi_start(ar); if (ath10k_init_configure_target(ar)) { status = -EINVAL; goto err; } status = ath10k_download_cal_data(ar); if (status) goto err; /* Some of of qca988x solutions are having global reset issue * during target initialization. Bypassing PLL setting before * downloading firmware and letting the SoC run on REF_CLK is * fixing the problem. Corresponding firmware change is also needed * to set the clock source once the target is initialized. */ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, ar->fw_features)) { status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1); if (status) { ath10k_err(ar, "could not write to skip_clock_init: %d\n", status); goto err; } } status = ath10k_download_fw(ar, mode); if (status) goto err; status = ath10k_init_uart(ar); if (status) goto err; ar->htc.htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; status = ath10k_htc_init(ar); if (status) { ath10k_err(ar, "could not init HTC (%d)\n", status); goto err; } status = ath10k_bmi_done(ar); if (status) goto err; status = ath10k_wmi_attach(ar); if (status) { ath10k_err(ar, "WMI attach failed: %d\n", status); goto err; } status = ath10k_htt_init(ar); if (status) { ath10k_err(ar, "failed to init htt: %d\n", status); goto err_wmi_detach; } status = ath10k_htt_tx_alloc(&ar->htt); if (status) { ath10k_err(ar, "failed to alloc htt tx: %d\n", status); goto err_wmi_detach; } status = ath10k_htt_rx_alloc(&ar->htt); if (status) { ath10k_err(ar, "failed to alloc htt rx: %d\n", status); goto err_htt_tx_detach; } status = ath10k_hif_start(ar); if (status) { ath10k_err(ar, "could not start HIF: %d\n", status); goto err_htt_rx_detach; } status = ath10k_htc_wait_target(&ar->htc); if (status) { ath10k_err(ar, "failed to connect to HTC: %d\n", status); goto err_hif_stop; } if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_htt_connect(&ar->htt); if (status) { ath10k_err(ar, "failed to connect htt (%d)\n", status); goto err_hif_stop; } } status = ath10k_wmi_connect(ar); if (status) { ath10k_err(ar, "could not connect wmi: %d\n", status); goto err_hif_stop; } status = ath10k_htc_start(&ar->htc); if (status) { ath10k_err(ar, "failed to start htc: %d\n", status); goto err_hif_stop; } if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_wmi_wait_for_service_ready(ar); if (status) { ath10k_warn(ar, "wmi service ready event not received"); goto err_hif_stop; } } ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ar->hw->wiphy->fw_version); status = ath10k_wmi_cmd_init(ar); if (status) { ath10k_err(ar, "could not send WMI init command (%d)\n", status); goto err_hif_stop; } status = ath10k_wmi_wait_for_unified_ready(ar); if (status) { ath10k_err(ar, "wmi unified ready event not received\n"); goto err_hif_stop; } /* If firmware indicates Full Rx Reorder support it must be used in a * slightly different manner. Let HTT code know. */ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)); status = ath10k_htt_rx_ring_refill(ar); if (status) { ath10k_err(ar, "failed to refill htt rx ring: %d\n", status); goto err_hif_stop; } /* we don't care about HTT in UTF mode */ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_htt_setup(&ar->htt); if (status) { ath10k_err(ar, "failed to setup htt: %d\n", status); goto err_hif_stop; } } status = ath10k_debug_start(ar); if (status) goto err_hif_stop; ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1; INIT_LIST_HEAD(&ar->arvifs); return 0; err_hif_stop: ath10k_hif_stop(ar); err_htt_rx_detach: ath10k_htt_rx_free(&ar->htt); err_htt_tx_detach: ath10k_htt_tx_free(&ar->htt); err_wmi_detach: ath10k_wmi_detach(ar); err: return status; }