示例#1
0
文件: rx.c 项目: andy-shev/linux
/*
 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
 *
 * Called when a RBD can be reused. The RBD is transferred to the allocator.
 * When there are 2 empty RBDs - a request for allocation is posted
 */
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
				  struct iwl_rx_mem_buffer *rxb,
				  struct iwl_rxq *rxq, bool emergency)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;

	/* Move the RBD to the used list, will be moved to allocator in batches
	 * before claiming or posting a request*/
	list_add_tail(&rxb->list, &rxq->rx_used);

	if (unlikely(emergency))
		return;

	/* Count the allocator owned RBDs */
	rxq->used_count++;

	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
	 * after but we still need to post another request.
	 */
	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
		/* Move the 2 RBDs to the allocator ownership.
		 Allocator has another 6 from pool for the request completion*/
		spin_lock(&rba->lock);
		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_pending);
		queue_work(rba->alloc_wq, &rba->rx_alloc);
	}
}
示例#2
0
文件: virt-dma.c 项目: 03199618/linux
/*
 * This tasklet handles the completion of a DMA descriptor by
 * calling its callback and freeing it.
 */
static void vchan_complete(unsigned long arg)
{
	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
	struct virt_dma_desc *vd;
	dma_async_tx_callback cb = NULL;
	void *cb_data = NULL;
	LIST_HEAD(head);

	spin_lock_irq(&vc->lock);
	list_splice_tail_init(&vc->desc_completed, &head);
	vd = vc->cyclic;
	if (vd) {
		vc->cyclic = NULL;
		cb = vd->tx.callback;
		cb_data = vd->tx.callback_param;
	}
	spin_unlock_irq(&vc->lock);

	if (cb)
		cb(cb_data);

	while (!list_empty(&head)) {
		vd = list_first_entry(&head, struct virt_dma_desc, node);
		cb = vd->tx.callback;
		cb_data = vd->tx.callback_param;

		list_del(&vd->node);

		vc->desc_free(vd);

		if (cb)
			cb(cb_data);
	}
}
示例#3
0
static void
bfa_ioim_sgpg_alloced(void *cbarg)
{
	struct bfa_ioim_s *ioim = cbarg;

	ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
	list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
	bfa_ioim_sgpg_setup(ioim);
	bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
}
static void g2d_free_runqueue_node(struct g2d_data *g2d,
				   struct g2d_runqueue_node *runqueue_node)
{
	if (!runqueue_node)
		return;

	mutex_lock(&g2d->cmdlist_mutex);
	list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
	mutex_unlock(&g2d->cmdlist_mutex);

	kmem_cache_free(g2d->runqueue_slab, runqueue_node);
}
示例#5
0
static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
{
	struct iio_dma_buffer_block *block, *_block;
	LIST_HEAD(block_list);

	spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
	list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
	spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);

	list_for_each_entry_safe(block, _block, &block_list, head)
		iio_buffer_block_release(&block->kref);
}
示例#6
0
static void g2d_free_runqueue_node(struct g2d_data *g2d,
				   struct g2d_runqueue_node *runqueue_node)
{
	struct g2d_cmdlist_node *node;

	if (!runqueue_node)
		return;

	mutex_lock(&g2d->cmdlist_mutex);
	/*
	 * commands in run_cmdlist have been completed so unmap all gem
	 * objects in each command node so that they are unreferenced.
	 */
	list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
		g2d_unmap_cmdlist_gem(g2d, node);
	list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
	mutex_unlock(&g2d->cmdlist_mutex);

	kmem_cache_free(g2d->runqueue_slab, runqueue_node);
}
示例#7
0
static enum htc_send_queue_result htc_try_send(struct htc_target *target,
					       struct htc_endpoint *ep,
					       struct list_head *txq)
{
	struct list_head send_queue;	/* temp queue to hold packets */
	struct htc_packet *packet, *tmp_pkt;
	struct ath6kl *ar = target->dev->ar;
	enum htc_send_full_action action;
	int tx_resources, overflow, txqueue_depth, i, good_pkts;
	u8 pipeid;

	ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
		   __func__, txq,
		   (txq == NULL) ? 0 : get_queue_depth(txq));

	/* init the local send queue */
	INIT_LIST_HEAD(&send_queue);

	/*
	 * txq equals to NULL means
	 * caller didn't provide a queue, just wants us to
	 * check queues and send
	 */
	if (txq != NULL) {
		if (list_empty(txq)) {
			/* empty queue */
			return HTC_SEND_QUEUE_DROP;
		}

		spin_lock_bh(&target->tx_lock);
		txqueue_depth = get_queue_depth(&ep->txq);
		spin_unlock_bh(&target->tx_lock);

		if (txqueue_depth >= ep->max_txq_depth) {
			/* we've already overflowed */
			overflow = get_queue_depth(txq);
		} else {
			/* get how much we will overflow by */
			overflow = txqueue_depth;
			overflow += get_queue_depth(txq);
			/* get how much we will overflow the TX queue by */
			overflow -= ep->max_txq_depth;
		}

		/* if overflow is negative or zero, we are okay */
		if (overflow > 0) {
			ath6kl_dbg(ATH6KL_DBG_HTC,
				   "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
				   __func__, ep->eid, overflow, txqueue_depth,
				   ep->max_txq_depth);
		}
		if ((overflow <= 0) ||
		    (ep->ep_cb.tx_full == NULL)) {
			/*
			 * all packets will fit or caller did not provide send
			 * full indication handler -- just move all of them
			 * to the local send_queue object
			 */
			list_splice_tail_init(txq, &send_queue);
		} else {
			good_pkts = get_queue_depth(txq) - overflow;
			if (good_pkts < 0) {
				WARN_ON_ONCE(1);
				return HTC_SEND_QUEUE_DROP;
			}

			/* we have overflowed, and a callback is provided */
			/* dequeue all non-overflow packets to the sendqueue */
			for (i = 0; i < good_pkts; i++) {
				/* pop off caller's queue */
				packet = list_first_entry(txq,
							  struct htc_packet,
							  list);
				list_del(&packet->list);
				/* insert into local queue */
				list_add_tail(&packet->list, &send_queue);
			}

			/*
			 * the caller's queue has all the packets that won't fit
			 * walk through the caller's queue and indicate each to
			 * the send full handler
			 */
			list_for_each_entry_safe(packet, tmp_pkt,
						 txq, list) {

				ath6kl_dbg(ATH6KL_DBG_HTC,
					   "%s: Indicat overflowed TX pkts: %p\n",
					   __func__, packet);
				action = ep->ep_cb.tx_full(ep->target, packet);
				if (action == HTC_SEND_FULL_DROP) {
					/* callback wants the packet dropped */
					ep->ep_st.tx_dropped += 1;

					/* leave this one in the caller's queue
					 * for cleanup */
				} else {
					/* callback wants to keep this packet,
					 * remove from caller's queue */
					list_del(&packet->list);
					/* put it in the send queue */
					list_add_tail(&packet->list,
						      &send_queue);
				}

			}

			if (list_empty(&send_queue)) {
				/* no packets made it in, caller will cleanup */
				return HTC_SEND_QUEUE_DROP;
			}
		}
	}
示例#8
0
void
bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
{
	INIT_LIST_HEAD(comp_q);
	list_splice_tail_init(&bfa->comp_q, comp_q);
}
示例#9
0
文件: rx.c 项目: andy-shev/linux
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
				struct iwl_rx_mem_buffer *rxb,
				bool emergency)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
	bool page_stolen = false;
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
	u32 offset = 0;

	if (WARN_ON(!rxb))
		return;

	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);

	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
		struct iwl_rx_packet *pkt;
		u16 sequence;
		bool reclaim;
		int index, cmd_index, len;
		struct iwl_rx_cmd_buffer rxcb = {
			._offset = offset,
			._rx_page_order = trans_pcie->rx_page_order,
			._page = rxb->page,
			._page_stolen = false,
			.truesize = max_len,
		};

		pkt = rxb_addr(&rxcb);

		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
			break;

		IWL_DEBUG_RX(trans,
			     "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
			     rxcb._offset,
			     iwl_get_cmd_string(trans,
						iwl_cmd_id(pkt->hdr.cmd,
							   pkt->hdr.group_id,
							   0)),
			     pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));

		len = iwl_rx_packet_len(pkt);
		len += sizeof(u32); /* account for status word */
		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
		if (reclaim) {
			int i;

			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
				if (trans_pcie->no_reclaim_cmds[i] ==
							pkt->hdr.cmd) {
					reclaim = false;
					break;
				}
			}
		}

		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);

		iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);

		if (reclaim) {
			kzfree(txq->entries[cmd_index].free_buf);
			txq->entries[cmd_index].free_buf = NULL;
		}

		/*
		 * After here, we should always check rxcb._page_stolen,
		 * if it is true then one of the handlers took the page.
		 */

		if (reclaim) {
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking
			 * iwl_trans_send_cmd()
			 * as we reclaim the driver command queue */
			if (!rxcb._page_stolen)
				iwl_pcie_hcmd_complete(trans, &rxcb);
			else
				IWL_WARN(trans, "Claim null rxb?\n");
		}

		page_stolen |= rxcb._page_stolen;
		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
	}

	/* page was stolen from us -- free our reference */
	if (page_stolen) {
		__free_pages(rxb->page, trans_pcie->rx_page_order);
		rxb->page = NULL;
	}

	/* Reuse the page if possible. For notification packets and
	 * SKBs that fail to Rx correctly, add them back into the
	 * rx_free list for reuse later. */
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
				     PAGE_SIZE << trans_pcie->rx_page_order,
				     DMA_FROM_DEVICE);
		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
			/*
			 * free the page(s) as well to not break
			 * the invariant that the items on the used
			 * list have no page(s)
			 */
			__free_pages(rxb->page, trans_pcie->rx_page_order);
			rxb->page = NULL;
			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
		} else {
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		}
	} else
		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
}

/*
 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
 */
static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rxq *rxq = &trans_pcie->rxq;
	u32 r, i, j, count = 0;
	bool emergency = false;

restart:
	spin_lock(&rxq->lock);
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);

	while (i != r) {
		struct iwl_rx_mem_buffer *rxb;

		if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
			emergency = true;

		rxb = rxq->queue[i];
		rxq->queue[i] = NULL;

		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
		iwl_pcie_rx_handle_rb(trans, rxb, emergency);

		i = (i + 1) & RX_QUEUE_MASK;

		/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
		 * try to claim the pre-allocated buffers from the allocator */
		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
			struct iwl_rb_allocator *rba = &trans_pcie->rba;
			struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];

			if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
			    !emergency) {
				/* Add the remaining 6 empty RBDs
				* for allocator use
				 */
				spin_lock(&rba->lock);
				list_splice_tail_init(&rxq->rx_used,
						      &rba->rbd_empty);
				spin_unlock(&rba->lock);
			}

			/* If not ready - continue, will try to reclaim later.
			* No need to reschedule work - allocator exits only on
			* success */
			if (!iwl_pcie_rx_allocator_get(trans, out)) {
				/* If success - then RX_CLAIM_REQ_ALLOC
				 * buffers were retrieved and should be added
				 * to free list */
				rxq->used_count -= RX_CLAIM_REQ_ALLOC;
				for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
					list_add_tail(&out[j]->list,
						      &rxq->rx_free);
					rxq->free_count++;
				}
			}
		}
		if (emergency) {
			count++;
			if (count == 8) {
				count = 0;
				if (rxq->used_count < RX_QUEUE_SIZE / 3)
					emergency = false;
				spin_unlock(&rxq->lock);
				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
				spin_lock(&rxq->lock);
			}
		}
		/* handle restock for three cases, can be all of them at once:
		* - we just pulled buffers from the allocator
		* - we have 8+ unstolen pages accumulated
		* - we are in emergency and allocated buffers
		 */
		if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
			rxq->read = i;
			spin_unlock(&rxq->lock);
			iwl_pcie_rxq_restock(trans);
			goto restart;
		}
	}

	/* Backtrack one entry */
	rxq->read = i;
	spin_unlock(&rxq->lock);

	/*
	 * handle a case where in emergency there are some unallocated RBDs.
	 * those RBDs are in the used list, but are not tracked by the queue's
	 * used_count which counts allocator owned RBDs.
	 * unallocated emergency RBDs must be allocated on exit, otherwise
	 * when called again the function may not be in emergency mode and
	 * they will be handed to the allocator with no tracking in the RBD
	 * allocator counters, which will lead to them never being claimed back
	 * by the queue.
	 * by allocating them here, they are now in the queue free list, and
	 * will be restocked by the next call of iwl_pcie_rxq_restock.
	 */
	if (unlikely(emergency && count))
		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);

	if (trans_pcie->napi.poll)
		napi_gro_flush(&trans_pcie->napi, false);
}

/*
 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
 */
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int i;

	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
	if (trans->cfg->internal_wimax_coex &&
	    !trans->cfg->apmg_not_supported &&
	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
			    APMG_PS_CTRL_VAL_RESET_REQ))) {
		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
		iwl_op_mode_wimax_active(trans->op_mode);
		wake_up(&trans_pcie->wait_command_queue);
		return;
	}

	iwl_pcie_dump_csr(trans);
	iwl_dump_fh(trans, NULL);

	local_bh_disable();
	/* The STATUS_FW_ERROR bit is set in this function. This must happen
	 * before we wake up the command caller, to ensure a proper cleanup. */
	iwl_trans_fw_error(trans);
	local_bh_enable();

	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
		del_timer(&trans_pcie->txq[i].stuck_timer);

	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
	wake_up(&trans_pcie->wait_command_queue);
}

static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
{
	u32 inta;

	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);

	trace_iwlwifi_dev_irq(trans->dev);

	/* Discover which interrupts are active/pending */
	inta = iwl_read32(trans, CSR_INT);

	/* the thread will service interrupts and re-enable them */
	return inta;
}
示例#10
0
文件: rx.c 项目: andy-shev/linux
/*
 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 *
 * Allocates for each received request 8 pages
 * Called as a scheduled work item.
 */
static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rb_allocator *rba = &trans_pcie->rba;
	struct list_head local_empty;
	int pending = atomic_xchg(&rba->req_pending, 0);

	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);

	/* If we were scheduled - there is at least one request */
	spin_lock(&rba->lock);
	/* swap out the rba->rbd_empty to a local list */
	list_replace_init(&rba->rbd_empty, &local_empty);
	spin_unlock(&rba->lock);

	while (pending) {
		int i;
		struct list_head local_allocated;

		INIT_LIST_HEAD(&local_allocated);

		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
			struct iwl_rx_mem_buffer *rxb;
			struct page *page;

			/* List should never be empty - each reused RBD is
			 * returned to the list, and initial pool covers any
			 * possible gap between the time the page is allocated
			 * to the time the RBD is added.
			 */
			BUG_ON(list_empty(&local_empty));
			/* Get the first rxb from the rbd list */
			rxb = list_first_entry(&local_empty,
					       struct iwl_rx_mem_buffer, list);
			BUG_ON(rxb->page);

			/* Alloc a new receive buffer */
			page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
			if (!page)
				continue;
			rxb->page = page;

			/* Get physical address of the RB */
			rxb->page_dma = dma_map_page(trans->dev, page, 0,
					PAGE_SIZE << trans_pcie->rx_page_order,
					DMA_FROM_DEVICE);
			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
				rxb->page = NULL;
				__free_pages(page, trans_pcie->rx_page_order);
				continue;
			}
			/* dma address must be no more than 36 bits */
			BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
			/* and also 256 byte aligned! */
			BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

			/* move the allocated entry to the out list */
			list_move(&rxb->list, &local_allocated);
			i++;
		}

		pending--;
		if (!pending) {
			pending = atomic_xchg(&rba->req_pending, 0);
			IWL_DEBUG_RX(trans,
				     "Pending allocation requests = %d\n",
				     pending);
		}

		spin_lock(&rba->lock);
		/* add the allocated rbds to the allocator allocated list */
		list_splice_tail(&local_allocated, &rba->rbd_allocated);
		/* get more empty RBDs for current pending requests */
		list_splice_tail_init(&rba->rbd_empty, &local_empty);
		spin_unlock(&rba->lock);

		atomic_inc(&rba->req_ready);
	}

	spin_lock(&rba->lock);
	/* return unused rbds to the allocator empty list */
	list_splice_tail(&local_empty, &rba->rbd_empty);
	spin_unlock(&rba->lock);
}