Ejemplo n.º 1
0
static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
				      struct ixgbe_tx_buffer *tx_bi)
{
	xdp_return_frame(tx_bi->xdpf);
	dma_unmap_single(tx_ring->dev,
			 dma_unmap_addr(tx_bi, dma),
			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
	dma_unmap_len_set(tx_bi, len, 0);
}
Ejemplo n.º 2
0
/**
 * arc_emac_tx - Starts the data transmission.
 * @skb:	sk_buff pointer that contains data to be Transmitted.
 * @ndev:	Pointer to net_device structure.
 *
 * returns: NETDEV_TX_OK, on success
 *		NETDEV_TX_BUSY, if any of the descriptors are not free.
 *
 * This function is invoked from upper layers to initiate transmission.
 */
static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	unsigned int len, *txbd_curr = &priv->txbd_curr;
	struct net_device_stats *stats = &priv->stats;
	__le32 *info = &priv->txbd[*txbd_curr].info;
	dma_addr_t addr;

	if (skb_padto(skb, ETH_ZLEN))
		return NETDEV_TX_OK;

	len = max_t(unsigned int, ETH_ZLEN, skb->len);

	/* EMAC still holds this buffer in its possession.
	 * CPU must not modify this buffer descriptor
	 */
	if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
		netif_stop_queue(ndev);
		return NETDEV_TX_BUSY;
	}

	addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
			      DMA_TO_DEVICE);

	if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
		stats->tx_dropped++;
		stats->tx_errors++;
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}
	dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
	dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);

	priv->tx_buff[*txbd_curr].skb = skb;
	priv->txbd[*txbd_curr].data = cpu_to_le32(addr);

	/* Make sure pointer to data buffer is set */
	wmb();

	skb_tx_timestamp(skb);

	*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);

	/* Increment index to point to the next BD */
	*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;

	/* Get "info" of the next BD */
	info = &priv->txbd[*txbd_curr].info;

	/* Check if if Tx BD ring is full - next BD is still owned by EMAC */
	if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
		netif_stop_queue(ndev);

	arc_reg_set(priv, R_STATUS, TXPL_MASK);

	return NETDEV_TX_OK;
}
Ejemplo n.º 3
0
void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
				      struct fm10k_tx_buffer *tx_buffer)
{
	if (tx_buffer->skb) {
		dev_kfree_skb_any(tx_buffer->skb);
		if (dma_unmap_len(tx_buffer, len))
			dma_unmap_single(ring->dev,
					 dma_unmap_addr(tx_buffer, dma),
					 dma_unmap_len(tx_buffer, len),
					 DMA_TO_DEVICE);
	} else if (dma_unmap_len(tx_buffer, len)) {
		dma_unmap_page(ring->dev,
			       dma_unmap_addr(tx_buffer, dma),
			       dma_unmap_len(tx_buffer, len),
			       DMA_TO_DEVICE);
	}
	tx_buffer->next_to_watch = NULL;
	tx_buffer->skb = NULL;
	dma_unmap_len_set(tx_buffer, len, 0);
	/* tx_buffer must be completely set up in the transmit path */
}
Ejemplo n.º 4
0
/**
 * arc_emac_open - Open the network device.
 * @ndev:	Pointer to the network device.
 *
 * returns: 0, on success or non-zero error value on failure.
 *
 * This function sets the MAC address, requests and enables an IRQ
 * for the EMAC device and starts the Tx queue.
 * It also connects to the phy device.
 */
static int arc_emac_open(struct net_device *ndev)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	struct phy_device *phy_dev = ndev->phydev;
	int i;

	phy_dev->autoneg = AUTONEG_ENABLE;
	phy_dev->speed = 0;
	phy_dev->duplex = 0;
	linkmode_and(phy_dev->advertising, phy_dev->advertising,
		     phy_dev->supported);

	priv->last_rx_bd = 0;

	/* Allocate and set buffers for Rx BD's */
	for (i = 0; i < RX_BD_NUM; i++) {
		dma_addr_t addr;
		unsigned int *last_rx_bd = &priv->last_rx_bd;
		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];

		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
							 EMAC_BUFFER_SIZE);
		if (unlikely(!rx_buff->skb))
			return -ENOMEM;

		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
		if (dma_mapping_error(&ndev->dev, addr)) {
			netdev_err(ndev, "cannot dma map\n");
			dev_kfree_skb(rx_buff->skb);
			return -ENOMEM;
		}
		dma_unmap_addr_set(rx_buff, addr, addr);
		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);

		rxbd->data = cpu_to_le32(addr);

		/* Make sure pointer to data buffer is set */
		wmb();

		/* Return ownership to EMAC */
		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);

		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
	}

	priv->txbd_curr = 0;
	priv->txbd_dirty = 0;

	/* Clean Tx BD's */
	memset(priv->txbd, 0, TX_RING_SZ);

	/* Initialize logical address filter */
	arc_reg_set(priv, R_LAFL, 0);
	arc_reg_set(priv, R_LAFH, 0);

	/* Set BD ring pointers for device side */
	arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
	arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);

	/* Enable interrupts */
	arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);

	/* Set CONTROL */
	arc_reg_set(priv, R_CTRL,
		    (RX_BD_NUM << 24) |	/* RX BD table length */
		    (TX_BD_NUM << 16) |	/* TX BD table length */
		    TXRN_MASK | RXRN_MASK);

	napi_enable(&priv->napi);

	/* Enable EMAC */
	arc_reg_or(priv, R_CTRL, EN_MASK);

	phy_start(ndev->phydev);

	netif_start_queue(ndev);

	return 0;
}
Ejemplo n.º 5
0
/**
 * arc_emac_rx - processing of Rx packets.
 * @ndev:	Pointer to the network device.
 * @budget:	How many BDs to process on 1 call.
 *
 * returns:	Number of processed BDs
 *
 * Iterate through Rx BDs and deliver received packages to upper layer.
 */
static int arc_emac_rx(struct net_device *ndev, int budget)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	unsigned int work_done;

	for (work_done = 0; work_done < budget; work_done++) {
		unsigned int *last_rx_bd = &priv->last_rx_bd;
		struct net_device_stats *stats = &ndev->stats;
		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
		unsigned int pktlen, info = le32_to_cpu(rxbd->info);
		struct sk_buff *skb;
		dma_addr_t addr;

		if (unlikely((info & OWN_MASK) == FOR_EMAC))
			break;

		/* Make a note that we saw a packet at this BD.
		 * So next time, driver starts from this + 1
		 */
		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;

		if (unlikely((info & FIRST_OR_LAST_MASK) !=
			     FIRST_OR_LAST_MASK)) {
			/* We pre-allocate buffers of MTU size so incoming
			 * packets won't be split/chained.
			 */
			if (net_ratelimit())
				netdev_err(ndev, "incomplete packet received\n");

			/* Return ownership to EMAC */
			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
			stats->rx_errors++;
			stats->rx_length_errors++;
			continue;
		}

		/* Prepare the BD for next cycle. netif_receive_skb()
		 * only if new skb was allocated and mapped to avoid holes
		 * in the RX fifo.
		 */
		skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
		if (unlikely(!skb)) {
			if (net_ratelimit())
				netdev_err(ndev, "cannot allocate skb\n");
			/* Return ownership to EMAC */
			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
			stats->rx_errors++;
			stats->rx_dropped++;
			continue;
		}

		addr = dma_map_single(&ndev->dev, (void *)skb->data,
				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
		if (dma_mapping_error(&ndev->dev, addr)) {
			if (net_ratelimit())
				netdev_err(ndev, "cannot map dma buffer\n");
			dev_kfree_skb(skb);
			/* Return ownership to EMAC */
			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
			stats->rx_errors++;
			stats->rx_dropped++;
			continue;
		}

		/* unmap previosly mapped skb */
		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);

		pktlen = info & LEN_MASK;
		stats->rx_packets++;
		stats->rx_bytes += pktlen;
		skb_put(rx_buff->skb, pktlen);
		rx_buff->skb->dev = ndev;
		rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);

		netif_receive_skb(rx_buff->skb);

		rx_buff->skb = skb;
		dma_unmap_addr_set(rx_buff, addr, addr);
		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);

		rxbd->data = cpu_to_le32(addr);

		/* Make sure pointer to data buffer is set */
		wmb();

		/* Return ownership to EMAC */
		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
	}

	return work_done;
}
Ejemplo n.º 6
0
/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
	struct iwl_queue *q = &txq->q;
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
	dma_addr_t phys_addr;
	unsigned long flags;
	u32 idx;
	u16 copy_size, cmd_size;
	bool is_ct_kill = false;
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif

	if (test_bit(STATUS_FW_ERROR, &priv->status)) {
		IWL_WARN(priv, "fw recovery, no hcmd send\n");
		return -EIO;
	}

	if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
	    !(cmd->flags & CMD_ON_DEMAND)) {
		IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
		return -EIO;
	}

	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}

	/*
	 * If any of the command structures end up being larger than
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
	 */
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
		return -EINVAL;

	if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
		IWL_WARN(priv, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(priv) ? "RF" : "CT");
		return -EIO;
	}

	spin_lock_irqsave(&priv->hcmd_lock, flags);

	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
		spin_unlock_irqrestore(&priv->hcmd_lock, flags);

		IWL_ERR(priv, "No space in command queue\n");
		is_ct_kill = iwl_check_for_ct_kill(priv);
		if (!is_ct_kill) {
			IWL_ERR(priv, "Restarting adapter due to queue full\n");
			iwlagn_fw_error(priv, false);
		}
		return -ENOSPC;
	}

	idx = get_cmd_index(q, q->write_ptr);
	out_cmd = txq->cmd[idx];
	out_meta = &txq->meta[idx];

	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
	if (cmd->flags & CMD_ASYNC)
		out_meta->callback = cmd->callback;

	/* set up the header */

	out_cmd->hdr.cmd = cmd->id;
	out_cmd->hdr.flags = 0;
	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
					    INDEX_TO_SEQ(q->write_ptr));

	/* and copy the data that needs to be copied */

	cmd_dest = &out_cmd->cmd.payload[0];
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
	}

	IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
			q->write_ptr, idx, priv->cmd_queue);

	phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
				DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
		idx = -ENOMEM;
		goto out;
	}

	dma_unmap_addr_set(out_meta, mapping, phys_addr);
	dma_unmap_len_set(out_meta, len, copy_size);

	iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
		phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
					   cmd->len[i], DMA_BIDIRECTIONAL);
		if (dma_mapping_error(priv->bus->dev, phys_addr)) {
			iwlagn_unmap_tfd(priv, out_meta,
					 &txq->tfds[q->write_ptr],
					 DMA_BIDIRECTIONAL);
			idx = -ENOMEM;
			goto out;
		}

		iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}

	out_meta->flags = cmd->flags;

	txq->need_update = 1;

	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_iwlwifi_dev_hcmd(priv, cmd->flags,
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif

	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	iwl_txq_update_write_ptr(priv, txq);

 out:
	spin_unlock_irqrestore(&priv->hcmd_lock, flags);
	return idx;
}
Ejemplo n.º 7
0
Archivo: tx.c Proyecto: dgarnier/linux
/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
	struct iwl_queue *q = &txq->q;
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
	dma_addr_t phys_addr;
	u32 idx;
	u16 copy_size, cmd_size;
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif

	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}

	/*
	 * If any of the command structures end up being larger than
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
	 */
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
		return -EINVAL;

	spin_lock_bh(&txq->lock);

	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
		spin_unlock_bh(&txq->lock);

		IWL_ERR(trans, "No space in command queue\n");
		iwl_op_mode_cmd_queue_full(trans->op_mode);
		return -ENOSPC;
	}

	idx = get_cmd_index(q, q->write_ptr);
	out_cmd = txq->entries[idx].cmd;
	out_meta = &txq->entries[idx].meta;

	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;

	/* set up the header */

	out_cmd->hdr.cmd = cmd->id;
	out_cmd->hdr.flags = 0;
	out_cmd->hdr.sequence =
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
					 INDEX_TO_SEQ(q->write_ptr));

	/* and copy the data that needs to be copied */

	cmd_dest = out_cmd->payload;
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
	}

	IWL_DEBUG_HC(trans,
		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
		     trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);

	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
				   DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
		idx = -ENOMEM;
		goto out;
	}

	dma_unmap_addr_set(out_meta, mapping, phys_addr);
	dma_unmap_len_set(out_meta, len, copy_size);

	iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
		phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
					   cmd->len[i], DMA_BIDIRECTIONAL);
		if (dma_mapping_error(trans->dev, phys_addr)) {
			iwl_unmap_tfd(trans, out_meta,
				      &txq->tfds[q->write_ptr],
				      DMA_BIDIRECTIONAL);
			idx = -ENOMEM;
			goto out;
		}

		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}

	out_meta->flags = cmd->flags;

	txq->need_update = 1;

	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif

	/* start timer if queue currently empty */
	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);

	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	iwl_txq_update_write_ptr(trans, txq);

 out:
	spin_unlock_bh(&txq->lock);
	return idx;
}
Ejemplo n.º 8
0
/**
 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
	struct iwl_queue *q = &txq->q;
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
	dma_addr_t phys_addr;
	unsigned long flags;
	int len;
	u32 idx;
	u16 fix_size;

	cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
	fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));

	/* If any of the command structures end up being larger than
	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
	 * we will need to increase the size of the TFD entries
	 * Also, check to see if command buffer should not exceed the size
	 * of device_cmd and max_cmd_size. */
	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
	       !(cmd->flags & CMD_SIZE_HUGE));
	BUG_ON(fix_size > IWL_MAX_CMD_SIZE);

	if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
		IWL_WARN(priv, "Not sending command - %s KILL\n",
			 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
		return -EIO;
	}

	spin_lock_irqsave(&priv->hcmd_lock, flags);

	if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
		spin_unlock_irqrestore(&priv->hcmd_lock, flags);

		IWL_ERR(priv, "Restarting adapter due to command queue full\n");
		queue_work(priv->workqueue, &priv->restart);
		return -ENOSPC;
	}

	idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
	out_cmd = txq->cmd[idx];
	out_meta = &txq->meta[idx];

	if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
		spin_unlock_irqrestore(&priv->hcmd_lock, flags);
		return -ENOSPC;
	}

	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
	out_meta->flags = cmd->flags | CMD_MAPPED;
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
	if (cmd->flags & CMD_ASYNC)
		out_meta->callback = cmd->callback;

	out_cmd->hdr.cmd = cmd->id;
	memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);

	/* At this point, the out_cmd now has all of the incoming cmd
	 * information */

	out_cmd->hdr.flags = 0;
	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
			INDEX_TO_SEQ(q->write_ptr));
	if (cmd->flags & CMD_SIZE_HUGE)
		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
	len = sizeof(struct iwl_device_cmd);
	if (idx == TFD_CMD_SLOTS)
		len = IWL_MAX_CMD_SIZE;

#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
	switch (out_cmd->hdr.cmd) {
	case REPLY_TX_LINK_QUALITY_CMD:
	case SENSITIVITY_CMD:
		IWL_DEBUG_HC_DUMP(priv,
				"Sending command %s (#%x), seq: 0x%04X, "
				"%d bytes at %d[%d]:%d\n",
				iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
				out_cmd->hdr.cmd,
				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
				q->write_ptr, idx, priv->cmd_queue);
		break;
	default:
		IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
				"%d bytes at %d[%d]:%d\n",
				iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
				out_cmd->hdr.cmd,
				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
				q->write_ptr, idx, priv->cmd_queue);
	}
#endif
	txq->need_update = 1;

	if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
		/* Set up entry in queue's byte count circular buffer */
		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);

	phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
				   fix_size, PCI_DMA_BIDIRECTIONAL);
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
	dma_unmap_len_set(out_meta, len, fix_size);

	trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
						fix_size, cmd->flags);

	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
						   phys_addr, fix_size, 1,
						   U32_PAD(cmd->len));

	/* Increment and update queue's write index */
	q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
	iwl_legacy_txq_update_write_ptr(priv, txq);

	spin_unlock_irqrestore(&priv->hcmd_lock, flags);
	return idx;
}
Ejemplo n.º 9
0
/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
	struct iwl_queue *q = &txq->q;
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
	dma_addr_t phys_addr;
	u32 idx;
	u16 copy_size, cmd_size, dma_size;
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
	const u8 *cmddata[IWL_MAX_CMD_TFDS];
	u16 cmdlen[IWL_MAX_CMD_TFDS];
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif

	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_WARN(trans, "fw recovery, no hcmd send\n");
		return -EIO;
	}

	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		cmddata[i] = cmd->data[i];
		cmdlen[i] = cmd->len[i];

		if (!cmd->len[i])
			continue;

		/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
		if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
			int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;

			if (copy > cmdlen[i])
				copy = cmdlen[i];
			cmdlen[i] -= copy;
			cmddata[i] += copy;
			copy_size += copy;
		}

		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmdlen[i];
		}
		cmd_size += cmd->len[i];
	}

	/*
	 * If any of the command structures end up being larger than
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
	 */
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
		return -EINVAL;

	spin_lock_bh(&txq->lock);

	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
		spin_unlock_bh(&txq->lock);

		IWL_ERR(trans, "No space in command queue\n");
		iwl_op_mode_cmd_queue_full(trans->op_mode);
		return -ENOSPC;
	}

	idx = get_cmd_index(q, q->write_ptr);
	out_cmd = txq->cmd[idx];
	out_meta = &txq->meta[idx];

	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;

	/* set up the header */

	out_cmd->hdr.cmd = cmd->id;
	out_cmd->hdr.flags = 0;
	out_cmd->hdr.sequence =
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
					 INDEX_TO_SEQ(q->write_ptr));

	/* and copy the data that needs to be copied */

	cmd_dest = out_cmd->payload;
	copy_size = sizeof(out_cmd->hdr);
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		int copy = 0;

		if (!cmd->len)
			continue;

		/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
		if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
			copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;

			if (copy > cmd->len[i])
				copy = cmd->len[i];
		}

		/* copy everything if not nocopy/dup */
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			copy = cmd->len[i];

		if (copy) {
			memcpy(cmd_dest, cmd->data[i], copy);
			cmd_dest += copy;
			copy_size += copy;
		}
	}

	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
			q->write_ptr, idx, trans_pcie->cmd_queue);

	/*
	 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
	 * still map at least that many bytes for the hardware to write back to.
	 * We have enough space, so that's not a problem.
	 */
	dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);

	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
				DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
		idx = -ENOMEM;
		goto out;
	}

	dma_unmap_addr_set(out_meta, mapping, phys_addr);
	dma_unmap_len_set(out_meta, len, dma_size);

	iwlagn_txq_attach_buf_to_tfd(trans, txq,
					phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	/* map the remaining (adjusted) nocopy/dup fragments */
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmdlen[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
		phys_addr = dma_map_single(trans->dev,
					   (void *)cmddata[i],
					   cmdlen[i], DMA_BIDIRECTIONAL);
		if (dma_mapping_error(trans->dev, phys_addr)) {
			iwlagn_unmap_tfd(trans, out_meta,
					 &txq->tfds[q->write_ptr],
					 DMA_BIDIRECTIONAL);
			idx = -ENOMEM;
			goto out;
		}

		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
					     cmdlen[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmddata[i];
		trace_lens[trace_idx] = cmdlen[i];
		trace_idx++;
#endif
	}

	out_meta->flags = cmd->flags;

	txq->need_update = 1;

	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif

	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	iwl_txq_update_write_ptr(trans, txq);

 out:
	spin_unlock_bh(&txq->lock);
	return idx;
}
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
	struct iwl_queue *q = &txq->q;
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
	dma_addr_t phys_addr;
	u32 idx;
	u16 copy_size, cmd_size;
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif

	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_WARN(trans, "fw recovery, no hcmd send\n");
		return -EIO;
	}

	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}

	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
		return -EINVAL;

	spin_lock_bh(&txq->lock);

	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
		spin_unlock_bh(&txq->lock);

		IWL_ERR(trans, "No space in command queue\n");
		iwl_op_mode_cmd_queue_full(trans->op_mode);
		return -ENOSPC;
	}

	idx = get_cmd_index(q, q->write_ptr);
	out_cmd = txq->cmd[idx];
	out_meta = &txq->meta[idx];

	memset(out_meta, 0, sizeof(*out_meta));	
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;

	

	out_cmd->hdr.cmd = cmd->id;
	out_cmd->hdr.flags = 0;
	out_cmd->hdr.sequence =
		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
					 INDEX_TO_SEQ(q->write_ptr));

	

	cmd_dest = out_cmd->payload;
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
	}

	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
			q->write_ptr, idx, trans_pcie->cmd_queue);

	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
				DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
		idx = -ENOMEM;
		goto out;
	}

	dma_unmap_addr_set(out_meta, mapping, phys_addr);
	dma_unmap_len_set(out_meta, len, copy_size);

	iwlagn_txq_attach_buf_to_tfd(trans, txq,
					phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
		phys_addr = dma_map_single(trans->dev,
					   (void *)cmd->data[i],
					   cmd->len[i], DMA_BIDIRECTIONAL);
		if (dma_mapping_error(trans->dev, phys_addr)) {
			iwlagn_unmap_tfd(trans, out_meta,
					 &txq->tfds[q->write_ptr],
					 DMA_BIDIRECTIONAL);
			idx = -ENOMEM;
			goto out;
		}

		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}

	out_meta->flags = cmd->flags;

	txq->need_update = 1;

	
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif

	
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	iwl_txq_update_write_ptr(trans, txq);

 out:
	spin_unlock_bh(&txq->lock);
	return idx;
}