/* * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. */ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp) { struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; unsigned int nentries_mask = src_ring->nentries_mask; unsigned int sw_index = src_ring->sw_index; unsigned int read_index; if (src_ring->hw_index == sw_index) { /* * The SW completion index has caught up with the cached * version of the HW completion index. * Update the cached HW completion index to see whether * the SW has really caught up to the HW, or if the cached * value of the HW index has become stale. */ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); if (read_index == 0xffffffff) return -ENODEV; read_index &= nentries_mask; src_ring->hw_index = read_index; } read_index = src_ring->hw_index; if (read_index == sw_index) return -EIO; if (per_transfer_contextp) *per_transfer_contextp = src_ring->per_transfer_context[sw_index]; /* sanity */ src_ring->per_transfer_context[sw_index] = NULL; /* Update sw_index */ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); src_ring->sw_index = sw_index; return 0; }
void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_crash_data ce; u32 addr, id; lockdep_assert_held(&ar->data_lock); ath10k_err(ar, "Copy Engine register dump:\n"); spin_lock_bh(&ar_pci->ce_lock); for (id = 0; id < CE_COUNT; id++) { addr = ath10k_ce_base_address(ar, id); ce.base_addr = cpu_to_le32(addr); ce.src_wr_idx = cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr)); ce.src_r_idx = cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr)); ce.dst_wr_idx = cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr)); ce.dst_r_idx = cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr)); if (crash_data) crash_data->ce_crash_data[id] = ce; ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id, le32_to_cpu(ce.base_addr), le32_to_cpu(ce.src_wr_idx), le32_to_cpu(ce.src_r_idx), le32_to_cpu(ce.dst_wr_idx), le32_to_cpu(ce.dst_r_idx)); } spin_unlock_bh(&ar_pci->ce_lock); }
static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); nentries = roundup_pow_of_two(attr->src_nentries); memset(src_ring->base_addr_owner_space, 0, nentries * sizeof(struct ce_desc)); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index &= src_ring->nentries_mask; src_ring->hw_index = src_ring->sw_index; src_ring->write_index = ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); src_ring->write_index &= src_ring->nentries_mask; ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, src_ring->base_addr_ce_space); ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot init ce src ring id %d entries %d base_addr %pK\n", ce_id, nentries, src_ring->base_addr_owner_space); return 0; }
static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, struct ath10k_ce_pipe *ce_state, const struct ce_attr *attr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_ring *src_ring; unsigned int nentries = attr->src_nentries; unsigned int ce_nbytes; u32 ctrl_addr = ath10k_ce_base_address(ce_id); dma_addr_t base_addr; char *ptr; nentries = roundup_pow_of_two(nentries); if (ce_state->src_ring) { WARN_ON(ce_state->src_ring->nentries != nentries); return 0; } ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); ptr = kzalloc(ce_nbytes, GFP_KERNEL); if (ptr == NULL) return -ENOMEM; ce_state->src_ring = (struct ath10k_ce_ring *)ptr; src_ring = ce_state->src_ring; ptr += sizeof(struct ath10k_ce_ring); src_ring->nentries = nentries; src_ring->nentries_mask = nentries - 1; src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index &= src_ring->nentries_mask; src_ring->hw_index = src_ring->sw_index; src_ring->write_index = ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); src_ring->write_index &= src_ring->nentries_mask; src_ring->per_transfer_context = (void **)ptr; /* * Legacy platforms that do not support cache * coherent DMA are unsupported */ src_ring->base_addr_owner_space_unaligned = pci_alloc_consistent(ar_pci->pdev, (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), &base_addr); if (!src_ring->base_addr_owner_space_unaligned) { kfree(ce_state->src_ring); ce_state->src_ring = NULL; return -ENOMEM; } src_ring->base_addr_ce_space_unaligned = base_addr; src_ring->base_addr_owner_space = PTR_ALIGN( src_ring->base_addr_owner_space_unaligned, CE_DESC_RING_ALIGN); src_ring->base_addr_ce_space = ALIGN( src_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); /* * Also allocate a shadow src ring in regular * mem to use for faster access. */ src_ring->shadow_base_unaligned = kmalloc((nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), GFP_KERNEL); if (!src_ring->shadow_base_unaligned) { pci_free_consistent(ar_pci->pdev, (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), src_ring->base_addr_owner_space, src_ring->base_addr_ce_space); kfree(ce_state->src_ring); ce_state->src_ring = NULL; return -ENOMEM; } src_ring->shadow_base = PTR_ALIGN( src_ring->shadow_base_unaligned, CE_DESC_RING_ALIGN); ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, src_ring->base_addr_ce_space); ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); ath10k_dbg(ATH10K_DBG_BOOT, "boot ce src ring id %d entries %d base_addr %p\n", ce_id, nentries, src_ring->base_addr_owner_space); return 0; }
/* * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. */ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp) { struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; unsigned int nentries_mask = src_ring->nentries_mask; unsigned int sw_index = src_ring->sw_index; struct ce_desc *sdesc, *sbase; unsigned int read_index; int ret; if (src_ring->hw_index == sw_index) { /* * The SW completion index has caught up with the cached * version of the HW completion index. * Update the cached HW completion index to see whether * the SW has really caught up to the HW, or if the cached * value of the HW index has become stale. */ ret = ath10k_pci_wake(ar); if (ret) return ret; src_ring->hw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->hw_index &= nentries_mask; ath10k_pci_sleep(ar); } read_index = src_ring->hw_index; if ((read_index == sw_index) || (read_index == 0xffffffff)) return -EIO; sbase = src_ring->shadow_base; sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); /* Return data from completed source descriptor */ *bufferp = __le32_to_cpu(sdesc->addr); *nbytesp = __le16_to_cpu(sdesc->nbytes); *transfer_idp = MS(__le16_to_cpu(sdesc->flags), CE_DESC_FLAGS_META_DATA); if (per_transfer_contextp) *per_transfer_contextp = src_ring->per_transfer_context[sw_index]; /* sanity */ src_ring->per_transfer_context[sw_index] = NULL; /* Update sw_index */ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); src_ring->sw_index = sw_index; return 0; }