示例#1
0
static int
bnad_set_ringparam(struct net_device *netdev,
		   struct ethtool_ringparam *ringparam)
{
	int i, current_err, err = 0;
	struct bnad *bnad = netdev_priv(netdev);

	mutex_lock(&bnad->conf_mutex);
	if (ringparam->rx_pending == bnad->rxq_depth &&
	    ringparam->tx_pending == bnad->txq_depth) {
		mutex_unlock(&bnad->conf_mutex);
		return 0;
	}

	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
	    ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
	    !BNA_POWER_OF_2(ringparam->rx_pending)) {
		mutex_unlock(&bnad->conf_mutex);
		return -EINVAL;
	}
	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
	    ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
	    !BNA_POWER_OF_2(ringparam->tx_pending)) {
		mutex_unlock(&bnad->conf_mutex);
		return -EINVAL;
	}

	if (ringparam->rx_pending != bnad->rxq_depth) {
		bnad->rxq_depth = ringparam->rx_pending;
		for (i = 0; i < bnad->num_rx; i++) {
			if (!bnad->rx_info[i].rx)
				continue;
			bnad_cleanup_rx(bnad, i);
			current_err = bnad_setup_rx(bnad, i);
			if (current_err && !err)
				err = current_err;
		}
	}
	if (ringparam->tx_pending != bnad->txq_depth) {
		bnad->txq_depth = ringparam->tx_pending;
		for (i = 0; i < bnad->num_tx; i++) {
			if (!bnad->tx_info[i].tx)
				continue;
			bnad_cleanup_tx(bnad, i);
			current_err = bnad_setup_tx(bnad, i);
			if (current_err && !err)
				err = current_err;
		}
	}

	mutex_unlock(&bnad->conf_mutex);
	return err;
}
示例#2
0
static int
bnad_set_ringparam(struct net_device *netdev,
		   struct ethtool_ringparam *ringparam)
{
	int i, current_err, err = 0;
	struct bnad *bnad = netdev_priv(netdev);
	unsigned long flags;

	mutex_lock(&bnad->conf_mutex);
	if (ringparam->rx_pending == bnad->rxq_depth &&
	    ringparam->tx_pending == bnad->txq_depth) {
		mutex_unlock(&bnad->conf_mutex);
		return 0;
	}

	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
	    ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
	    !BNA_POWER_OF_2(ringparam->rx_pending)) {
		mutex_unlock(&bnad->conf_mutex);
		return -EINVAL;
	}
	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
	    ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
	    !BNA_POWER_OF_2(ringparam->tx_pending)) {
		mutex_unlock(&bnad->conf_mutex);
		return -EINVAL;
	}

	if (ringparam->rx_pending != bnad->rxq_depth) {
		bnad->rxq_depth = ringparam->rx_pending;
		if (!netif_running(netdev)) {
			mutex_unlock(&bnad->conf_mutex);
			return 0;
		}

		for (i = 0; i < bnad->num_rx; i++) {
			if (!bnad->rx_info[i].rx)
				continue;
			bnad_destroy_rx(bnad, i);
			current_err = bnad_setup_rx(bnad, i);
			if (current_err && !err)
				err = current_err;
		}

		if (!err && bnad->rx_info[0].rx) {
			/* restore rx configuration */
			bnad_restore_vlans(bnad, 0);
			bnad_enable_default_bcast(bnad);
			spin_lock_irqsave(&bnad->bna_lock, flags);
			bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
			spin_unlock_irqrestore(&bnad->bna_lock, flags);
			bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
					     BNAD_CF_PROMISC);
			bnad_set_rx_mode(netdev);
		}
	}
	if (ringparam->tx_pending != bnad->txq_depth) {
		bnad->txq_depth = ringparam->tx_pending;
		if (!netif_running(netdev)) {
			mutex_unlock(&bnad->conf_mutex);
			return 0;
		}

		for (i = 0; i < bnad->num_tx; i++) {
			if (!bnad->tx_info[i].tx)
				continue;
			bnad_destroy_tx(bnad, i);
			current_err = bnad_setup_tx(bnad, i);
			if (current_err && !err)
				err = current_err;
		}
	}

	mutex_unlock(&bnad->conf_mutex);
	return err;
}