Esempio n. 1
0
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
		   void *per_transfer_context,
		   u32 buffer,
		   unsigned int nbytes,
		   unsigned int transfer_id,
		   unsigned int flags)
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ret;

	spin_lock_bh(&ar_pci->ce_lock);
	ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
				    buffer, nbytes, transfer_id, flags);
	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
}
Esempio n. 2
0
static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
	int ret;

	ret = request_irq(ar_ahb->irq,
			  ath10k_ahb_interrupt_handler,
			  IRQF_SHARED, "ath10k_ahb", ar);
	if (ret) {
		ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
			    ar_ahb->irq, ret);
		return ret;
	}
	ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;

	return 0;
}
Esempio n. 3
0
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
				  void **per_transfer_contextp,
				  u32 *bufferp,
				  unsigned int *nbytesp,
				  unsigned int *transfer_idp)
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ret;

	spin_lock_bh(&ar_pci->ce_lock);
	ret = ath10k_ce_completed_send_next_nolock(ce_state,
						   per_transfer_contextp,
						   bufferp, nbytesp,
						   transfer_idp);
	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
}
Esempio n. 4
0
static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
					     unsigned int ce_id,
					     const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	u32 ctrl_addr = ath10k_ce_base_address(ce_id);

	spin_lock_bh(&ar_pci->ce_lock);

	ce_state->ar = ar;
	ce_state->id = ce_id;
	ce_state->ctrl_addr = ctrl_addr;
	ce_state->attr_flags = attr->flags;
	ce_state->src_sz_max = attr->src_sz_max;

	spin_unlock_bh(&ar_pci->ce_lock);

	return ce_state;
}
Esempio n. 5
0
File: ce.c Progetto: asmalldev/linux
void ath10k_ce_dump_registers(struct ath10k *ar,
			      struct ath10k_fw_crash_data *crash_data)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_crash_data ce;
	u32 addr, id;

	lockdep_assert_held(&ar->data_lock);

	ath10k_err(ar, "Copy Engine register dump:\n");

	spin_lock_bh(&ar_pci->ce_lock);
	for (id = 0; id < CE_COUNT; id++) {
		addr = ath10k_ce_base_address(ar, id);
		ce.base_addr = cpu_to_le32(addr);

		ce.src_wr_idx =
			cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
		ce.src_r_idx =
			cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
		ce.dst_wr_idx =
			cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
		ce.dst_r_idx =
			cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));

		if (crash_data)
			crash_data->ce_crash_data[id] = ce;

		ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
			   le32_to_cpu(ce.base_addr),
			   le32_to_cpu(ce.src_wr_idx),
			   le32_to_cpu(ce.src_r_idx),
			   le32_to_cpu(ce.dst_wr_idx),
			   le32_to_cpu(ce.dst_r_idx));
	}

	spin_unlock_bh(&ar_pci->ce_lock);
}
Esempio n. 6
0
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ce_id, ret;
	u32 intr_summary;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	intr_summary = CE_INTERRUPT_SUMMARY(ar);

	for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
		if (intr_summary & (1 << ce_id))
			intr_summary &= ~(1 << ce_id);
		else
			/* no intr pending on this CE */
			continue;

		ath10k_ce_per_engine_service(ar, ce_id);
	}

	ath10k_pci_sleep(ar);
}
Esempio n. 7
0
/*
 * Guts of interrupt handler for per-engine interrupts on a particular CE.
 *
 * Invokes registered callbacks for recv_complete,
 * send_complete, and watermarks.
 */
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	u32 ctrl_addr = ce_state->ctrl_addr;
	int ret;

	ret = ath10k_pci_wake(ar);
	if (ret)
		return;

	spin_lock_bh(&ar_pci->ce_lock);

	/* Clear the copy-complete interrupts that will be handled here. */
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
					  HOST_IS_COPY_COMPLETE_MASK);

	spin_unlock_bh(&ar_pci->ce_lock);

	if (ce_state->recv_cb)
		ce_state->recv_cb(ce_state);

	if (ce_state->send_cb)
		ce_state->send_cb(ce_state);

	spin_lock_bh(&ar_pci->ce_lock);

	/*
	 * Misc CE interrupts are not being handled, but still need
	 * to be cleared.
	 */
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);

	spin_unlock_bh(&ar_pci->ce_lock);
	ath10k_pci_sleep(ar);
}
Esempio n. 8
0
File: ce.c Progetto: asmalldev/linux
static int ath10k_ce_init_src_ring(struct ath10k *ar,
				   unsigned int ce_id,
				   const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);

	nentries = roundup_pow_of_two(attr->src_nentries);

	memset(src_ring->base_addr_owner_space, 0,
	       nentries * sizeof(struct ce_desc));

	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
	src_ring->sw_index &= src_ring->nentries_mask;
	src_ring->hw_index = src_ring->sw_index;

	src_ring->write_index =
		ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
	src_ring->write_index &= src_ring->nentries_mask;

	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
					 src_ring->base_addr_ce_space);
	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);

	ath10k_dbg(ar, ATH10K_DBG_BOOT,
		   "boot init ce src ring id %d entries %d base_addr %pK\n",
		   ce_id, nentries, src_ring->base_addr_owner_space);

	return 0;
}
Esempio n. 9
0
File: ahb.c Progetto: Anjali05/linux
static int ath10k_ahb_probe(struct platform_device *pdev)
{
	struct ath10k *ar;
	struct ath10k_ahb *ar_ahb;
	struct ath10k_pci *ar_pci;
	const struct of_device_id *of_id;
	enum ath10k_hw_rev hw_rev;
	size_t size;
	int ret;
	struct ath10k_bus_params bus_params;

	of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
	if (!of_id) {
		dev_err(&pdev->dev, "failed to find matching device tree id\n");
		return -EINVAL;
	}

	hw_rev = (enum ath10k_hw_rev)of_id->data;

	size = sizeof(*ar_pci) + sizeof(*ar_ahb);
	ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
				hw_rev, &ath10k_ahb_hif_ops);
	if (!ar) {
		dev_err(&pdev->dev, "failed to allocate core\n");
		return -ENOMEM;
	}

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n");

	ar_pci = ath10k_pci_priv(ar);
	ar_ahb = ath10k_ahb_priv(ar);

	ar_ahb->pdev = pdev;
	platform_set_drvdata(pdev, ar);

	ret = ath10k_ahb_resource_init(ar);
	if (ret)
		goto err_core_destroy;

	ar->dev_id = 0;
	ar_pci->mem = ar_ahb->mem;
	ar_pci->mem_len = ar_ahb->mem_len;
	ar_pci->ar = ar;
	ar_pci->ce.bus_ops = &ath10k_ahb_bus_ops;
	ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
	ar->ce_priv = &ar_pci->ce;

	ret = ath10k_pci_setup_resource(ar);
	if (ret) {
		ath10k_err(ar, "failed to setup resource: %d\n", ret);
		goto err_resource_deinit;
	}

	ath10k_pci_init_napi(ar);

	ret = ath10k_ahb_request_irq_legacy(ar);
	if (ret)
		goto err_free_pipes;

	ret = ath10k_ahb_prepare_device(ar);
	if (ret)
		goto err_free_irq;

	ath10k_pci_ce_deinit(ar);

	bus_params.dev_type = ATH10K_DEV_TYPE_LL;
	bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
	if (bus_params.chip_id == 0xffffffff) {
		ath10k_err(ar, "failed to get chip id\n");
		ret = -ENODEV;
		goto err_halt_device;
	}

	ret = ath10k_core_register(ar, &bus_params);
	if (ret) {
		ath10k_err(ar, "failed to register driver core: %d\n", ret);
		goto err_halt_device;
	}

	return 0;

err_halt_device:
	ath10k_ahb_halt_chip(ar);
	ath10k_ahb_clock_disable(ar);

err_free_irq:
	ath10k_ahb_release_irq_legacy(ar);

err_free_pipes:
	ath10k_pci_free_pipes(ar);

err_resource_deinit:
	ath10k_ahb_resource_deinit(ar);

err_core_destroy:
	ath10k_core_destroy(ar);
	platform_set_drvdata(pdev, NULL);

	return ret;
}
Esempio n. 10
0
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
				    unsigned int ce_id,
				    struct ath10k_ce_pipe *ce_state,
				    const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_ring *dest_ring;
	unsigned int nentries = attr->dest_nentries;
	unsigned int ce_nbytes;
	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
	dma_addr_t base_addr;
	char *ptr;

	nentries = roundup_pow_of_two(nentries);

	if (ce_state->dest_ring) {
		WARN_ON(ce_state->dest_ring->nentries != nentries);
		return 0;
	}

	ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
	ptr = kzalloc(ce_nbytes, GFP_KERNEL);
	if (ptr == NULL)
		return -ENOMEM;

	ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
	dest_ring = ce_state->dest_ring;

	ptr += sizeof(struct ath10k_ce_ring);
	dest_ring->nentries = nentries;
	dest_ring->nentries_mask = nentries - 1;

	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
	dest_ring->sw_index &= dest_ring->nentries_mask;
	dest_ring->write_index =
		ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
	dest_ring->write_index &= dest_ring->nentries_mask;

	dest_ring->per_transfer_context = (void **)ptr;

	/*
	 * Legacy platforms that do not support cache
	 * coherent DMA are unsupported
	 */
	dest_ring->base_addr_owner_space_unaligned =
		pci_alloc_consistent(ar_pci->pdev,
				     (nentries * sizeof(struct ce_desc) +
				      CE_DESC_RING_ALIGN),
				     &base_addr);
	if (!dest_ring->base_addr_owner_space_unaligned) {
		kfree(ce_state->dest_ring);
		ce_state->dest_ring = NULL;
		return -ENOMEM;
	}

	dest_ring->base_addr_ce_space_unaligned = base_addr;

	/*
	 * Correctly initialize memory to 0 to prevent garbage
	 * data crashing system when download firmware
	 */
	memset(dest_ring->base_addr_owner_space_unaligned, 0,
	       nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);

	dest_ring->base_addr_owner_space = PTR_ALIGN(
			dest_ring->base_addr_owner_space_unaligned,
			CE_DESC_RING_ALIGN);
	dest_ring->base_addr_ce_space = ALIGN(
			dest_ring->base_addr_ce_space_unaligned,
			CE_DESC_RING_ALIGN);

	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
					  dest_ring->base_addr_ce_space);
	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
	ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);

	ath10k_dbg(ATH10K_DBG_BOOT,
		   "boot ce dest ring id %d entries %d base_addr %p\n",
		   ce_id, nentries, dest_ring->base_addr_owner_space);

	return 0;
}
Esempio n. 11
0
static int ath10k_ce_init_src_ring(struct ath10k *ar,
				   unsigned int ce_id,
				   struct ath10k_ce_pipe *ce_state,
				   const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_ring *src_ring;
	unsigned int nentries = attr->src_nentries;
	unsigned int ce_nbytes;
	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
	dma_addr_t base_addr;
	char *ptr;

	nentries = roundup_pow_of_two(nentries);

	if (ce_state->src_ring) {
		WARN_ON(ce_state->src_ring->nentries != nentries);
		return 0;
	}

	ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
	ptr = kzalloc(ce_nbytes, GFP_KERNEL);
	if (ptr == NULL)
		return -ENOMEM;

	ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
	src_ring = ce_state->src_ring;

	ptr += sizeof(struct ath10k_ce_ring);
	src_ring->nentries = nentries;
	src_ring->nentries_mask = nentries - 1;

	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
	src_ring->sw_index &= src_ring->nentries_mask;
	src_ring->hw_index = src_ring->sw_index;

	src_ring->write_index =
		ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
	src_ring->write_index &= src_ring->nentries_mask;

	src_ring->per_transfer_context = (void **)ptr;

	/*
	 * Legacy platforms that do not support cache
	 * coherent DMA are unsupported
	 */
	src_ring->base_addr_owner_space_unaligned =
		pci_alloc_consistent(ar_pci->pdev,
				     (nentries * sizeof(struct ce_desc) +
				      CE_DESC_RING_ALIGN),
				     &base_addr);
	if (!src_ring->base_addr_owner_space_unaligned) {
		kfree(ce_state->src_ring);
		ce_state->src_ring = NULL;
		return -ENOMEM;
	}

	src_ring->base_addr_ce_space_unaligned = base_addr;

	src_ring->base_addr_owner_space = PTR_ALIGN(
			src_ring->base_addr_owner_space_unaligned,
			CE_DESC_RING_ALIGN);
	src_ring->base_addr_ce_space = ALIGN(
			src_ring->base_addr_ce_space_unaligned,
			CE_DESC_RING_ALIGN);

	/*
	 * Also allocate a shadow src ring in regular
	 * mem to use for faster access.
	 */
	src_ring->shadow_base_unaligned =
		kmalloc((nentries * sizeof(struct ce_desc) +
			 CE_DESC_RING_ALIGN), GFP_KERNEL);
	if (!src_ring->shadow_base_unaligned) {
		pci_free_consistent(ar_pci->pdev,
				    (nentries * sizeof(struct ce_desc) +
				     CE_DESC_RING_ALIGN),
				    src_ring->base_addr_owner_space,
				    src_ring->base_addr_ce_space);
		kfree(ce_state->src_ring);
		ce_state->src_ring = NULL;
		return -ENOMEM;
	}

	src_ring->shadow_base = PTR_ALIGN(
			src_ring->shadow_base_unaligned,
			CE_DESC_RING_ALIGN);

	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
					 src_ring->base_addr_ce_space);
	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);

	ath10k_dbg(ATH10K_DBG_BOOT,
		   "boot ce src ring id %d entries %d base_addr %p\n",
		   ce_id, nentries, src_ring->base_addr_owner_space);

	return 0;
}