コード例 #1
0
/*
 * Initialize a Copy Engine based on caller-supplied attributes.
 * This may be called once to initialize both source and destination
 * rings or it may be called twice for separate source and destination
 * initialization. It may be that only one side or the other is
 * initialized by software/firmware.
 */
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
			const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	int ret;

	/*
	 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
	 * additional TX locking checks.
	 *
	 * For the lack of a better place do the check here.
	 */
	BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
	BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));

	ret = ath10k_pci_wake(ar);
	if (ret)
		return ret;

	spin_lock_bh(&ar_pci->ce_lock);
	ce_state->ar = ar;
	ce_state->id = ce_id;
	ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
	ce_state->attr_flags = attr->flags;
	ce_state->src_sz_max = attr->src_sz_max;
	spin_unlock_bh(&ar_pci->ce_lock);

	if (attr->src_nentries) {
		ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
		if (ret) {
			ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
				   ce_id, ret);
			goto out;
		}
	}

	if (attr->dest_nentries) {
		ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
		if (ret) {
			ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
				   ce_id, ret);
			goto out;
		}
	}

out:
	ath10k_pci_sleep(ar);
	return ret;
}
コード例 #2
0
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
{
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	ath10k_ce_deinit_src_ring(ar, ce_id);
	ath10k_ce_deinit_dest_ring(ar, ce_id);

	ath10k_pci_sleep(ar);
}
コード例 #3
0
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
			       void *per_recv_context,
			       u32 buffer)
{
	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int write_index;
	unsigned int sw_index;
	int ret;

	spin_lock_bh(&ar_pci->ce_lock);
	write_index = dest_ring->write_index;
	sw_index = dest_ring->sw_index;

	ret = ath10k_pci_wake(ar);
	if (ret)
		goto out;

	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
		struct ce_desc *base = dest_ring->base_addr_owner_space;
		struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);

		/* Update destination descriptor */
		desc->addr    = __cpu_to_le32(buffer);
		desc->nbytes = 0;

		dest_ring->per_transfer_context[write_index] =
							per_recv_context;

		/* Update Destination Ring Write Index */
		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
		ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
		dest_ring->write_index = write_index;
		ret = 0;
	} else {
		ret = -EIO;
	}
	ath10k_pci_sleep(ar);

out:
	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
}
コード例 #4
0
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ce_id, ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
		struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
		u32 ctrl_addr = ce_state->ctrl_addr;

		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
	}
	ath10k_pci_sleep(ar);
}
コード例 #5
0
/*
 * Initialize a Copy Engine based on caller-supplied attributes.
 * This may be called once to initialize both source and destination
 * rings or it may be called twice for separate source and destination
 * initialization. It may be that only one side or the other is
 * initialized by software/firmware.
 */
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
				unsigned int ce_id,
				const struct ce_attr *attr)
{
	struct ath10k_ce_pipe *ce_state;
	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return NULL;

	ce_state = ath10k_ce_init_state(ar, ce_id, attr);
	if (!ce_state) {
		ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
		return NULL;
	}

	if (attr->src_nentries) {
		ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
		if (ret) {
			ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
				   ce_id, ret);
			ath10k_ce_deinit(ce_state);
			return NULL;
		}
	}

	if (attr->dest_nentries) {
		ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
		if (ret) {
			ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
				   ce_id, ret);
			ath10k_ce_deinit(ce_state);
			return NULL;
		}
	}

	/* Enable CE error interrupts */
	ath10k_ce_error_intr_enable(ar, ctrl_addr);

	ath10k_pci_sleep(ar);

	return ce_state;
}
コード例 #6
0
int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
	int ce_id, ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return ret;

	for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
		u32 ctrl_addr = ath10k_ce_base_address(ce_id);

		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
		ath10k_ce_error_intr_disable(ar, ctrl_addr);
		ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
	}

	ath10k_pci_sleep(ar);

	return 0;
}
コード例 #7
0
/*
 * Adjust interrupts for the copy complete handler.
 * If it's needed for either send or recv, then unmask
 * this interrupt; otherwise, mask it.
 *
 * Called with ce_lock held.
 */
static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
						int disable_copy_compl_intr)
{
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	if ((!disable_copy_compl_intr) &&
	    (ce_state->send_cb || ce_state->recv_cb))
		ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
	else
		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);

	ath10k_ce_watermark_intr_disable(ar, ctrl_addr);

	ath10k_pci_sleep(ar);
}
コード例 #8
0
ファイル: if_athp_pci.c プロジェクト: erikarn/athp
static void
athp_pci_regio_s_write_reg(void *arg, uint32_t reg, uint32_t val)
{
	struct ath10k_pci *ar_pci = arg;
	struct ath10k *ar = &ar_pci->sc_sc;
	int tmp;

	tmp = ath10k_pci_wake(ar_pci);
	if (tmp) {
		device_printf(ar->sc_dev,
		    "%s: (reg=0x%08x) couldn't wake; err=%d\n",
		    __func__,
		    reg,
		    tmp);
		return;
	}
	ath10k_dbg(ar, ATH10K_DBG_REGIO,
	    "%s: %08x <- %08x\n",
	    __func__, reg, val);
	bus_space_write_4(ar_pci->sc_st, ar_pci->sc_sh, reg, val);
	ath10k_pci_sleep(ar_pci);
}
コード例 #9
0
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
	int ce_id, ret;
	u32 intr_summary;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	intr_summary = CE_INTERRUPT_SUMMARY(ar);

	for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
		if (intr_summary & (1 << ce_id))
			intr_summary &= ~(1 << ce_id);
		else
			/* no intr pending on this CE */
			continue;

		ath10k_ce_per_engine_service(ar, ce_id);
	}

	ath10k_pci_sleep(ar);
}
コード例 #10
0
/*
 * Guts of interrupt handler for per-engine interrupts on a particular CE.
 *
 * Invokes registered callbacks for recv_complete,
 * send_complete, and watermarks.
 */
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	u32 ctrl_addr = ce_state->ctrl_addr;
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	spin_lock_bh(&ar_pci->ce_lock);

	/* Clear the copy-complete interrupts that will be handled here. */
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
					  HOST_IS_COPY_COMPLETE_MASK);

	spin_unlock_bh(&ar_pci->ce_lock);

	if (ce_state->recv_cb)
		ce_state->recv_cb(ce_state);

	if (ce_state->send_cb)
		ce_state->send_cb(ce_state);

	spin_lock_bh(&ar_pci->ce_lock);

	/*
	 * Misc CE interrupts are not being handled, but still need
	 * to be cleared.
	 */
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);

	spin_unlock_bh(&ar_pci->ce_lock);
	ath10k_pci_sleep(ar);
}
コード例 #11
0
/*
 * Guts of ath10k_ce_completed_send_next.
 * The caller takes responsibility for any necessary locking.
 */
static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
						void **per_transfer_contextp,
						u32 *bufferp,
						unsigned int *nbytesp,
						unsigned int *transfer_idp)
{
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	unsigned int nentries_mask = src_ring->nentries_mask;
	unsigned int sw_index = src_ring->sw_index;
	struct ce_desc *sdesc, *sbase;
	unsigned int read_index;
	int ret;

	if (src_ring->hw_index == sw_index) {
		/*
		 * The SW completion index has caught up with the cached
		 * version of the HW completion index.
		 * Update the cached HW completion index to see whether
		 * the SW has really caught up to the HW, or if the cached
		 * value of the HW index has become stale.
		 */

		ret = ath10k_pci_wake(ar);
		if (ret)
			return ret;

		src_ring->hw_index =
			ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
		src_ring->hw_index &= nentries_mask;

		ath10k_pci_sleep(ar);
	}

	read_index = src_ring->hw_index;

	if ((read_index == sw_index) || (read_index == 0xffffffff))
		return -EIO;

	sbase = src_ring->shadow_base;
	sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);

	/* Return data from completed source descriptor */
	*bufferp = __le32_to_cpu(sdesc->addr);
	*nbytesp = __le16_to_cpu(sdesc->nbytes);
	*transfer_idp = MS(__le16_to_cpu(sdesc->flags),
			   CE_DESC_FLAGS_META_DATA);

	if (per_transfer_contextp)
		*per_transfer_contextp =
			src_ring->per_transfer_context[sw_index];

	/* sanity */
	src_ring->per_transfer_context[sw_index] = NULL;

	/* Update sw_index */
	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
	src_ring->sw_index = sw_index;

	return 0;
}
コード例 #12
0
/*
 * Guts of ath10k_ce_send, used by both ath10k_ce_send and
 * ath10k_ce_sendlist_send.
 * The caller takes responsibility for any needed locking.
 */
static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
				 void *per_transfer_context,
				 u32 buffer,
				 unsigned int nbytes,
				 unsigned int transfer_id,
				 unsigned int flags)
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
	struct ce_desc *desc, *sdesc;
	unsigned int nentries_mask = src_ring->nentries_mask;
	unsigned int sw_index = src_ring->sw_index;
	unsigned int write_index = src_ring->write_index;
	u32 ctrl_addr = ce_state->ctrl_addr;
	u32 desc_flags = 0;
	int ret = 0;

	if (nbytes > ce_state->src_sz_max)
		ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
			    __func__, nbytes, ce_state->src_sz_max);

	ret = ath10k_pci_wake(ar);
	if (ret)
		return ret;

	if (unlikely(CE_RING_DELTA(nentries_mask,
				   write_index, sw_index - 1) <= 0)) {
		ret = -ENOSR;
		goto exit;
	}

	desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
				   write_index);
	sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);

	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);

	if (flags & CE_SEND_FLAG_GATHER)
		desc_flags |= CE_DESC_FLAGS_GATHER;
	if (flags & CE_SEND_FLAG_BYTE_SWAP)
		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;

	sdesc->addr   = __cpu_to_le32(buffer);
	sdesc->nbytes = __cpu_to_le16(nbytes);
	sdesc->flags  = __cpu_to_le16(desc_flags);

	*desc = *sdesc;

	src_ring->per_transfer_context[write_index] = per_transfer_context;

	/* Update Source Ring Write Index */
	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);

	/* WORKAROUND */
	if (!(flags & CE_SEND_FLAG_GATHER))
		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);

	src_ring->write_index = write_index;
exit:
	ath10k_pci_sleep(ar);
	return ret;
}