예제 #1
0
파일: main.c 프로젝트: avagin/linux
static int
nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
			      u32 length)
{
	struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
	struct nfp_cpp *cpp = bpf->app->pf->cpp;

	if (length < sizeof(*cap)) {
		nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
		return -EINVAL;
	}

	bpf->adjust_head.flags = readl(&cap->flags);
	bpf->adjust_head.off_min = readl(&cap->off_min);
	bpf->adjust_head.off_max = readl(&cap->off_max);
	bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
	bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);

	if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
		nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
		return -EINVAL;
	}
	if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
	    !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
		nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
		memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
		return 0;
	}

	return 0;
}
예제 #2
0
static int
hwinfo_db_walk(struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, u32 size)
{
	const char *key, *val, *end = hwinfo->data + size;

	for (key = hwinfo->data; *key && key < end;
	     key = val + strlen(val) + 1) {

		val = key + strlen(key) + 1;
		if (val >= end) {
			nfp_warn(cpp, "Bad HWINFO - overflowing key\n");
			return -EINVAL;
		}

		if (val + strlen(val) + 1 > end) {
			nfp_warn(cpp, "Bad HWINFO - overflowing value\n");
			return -EINVAL;
		}
	}

	return 0;
}
예제 #3
0
/**
 * nfp_resource_acquire() - Acquire a resource handle
 * @cpp:	NFP CPP handle
 * @name:	Name of the resource
 *
 * NOTE: This function locks the acquired resource
 *
 * Return: NFP Resource handle, or ERR_PTR()
 */
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
{
	unsigned long warn_at = jiffies + 15 * HZ;
	struct nfp_cpp_mutex *dev_mutex;
	struct nfp_resource *res;
	int err;

	res = kzalloc(sizeof(*res), GFP_KERNEL);
	if (!res)
		return ERR_PTR(-ENOMEM);

	strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ);

	dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
					NFP_RESOURCE_TBL_BASE,
					NFP_RESOURCE_TBL_KEY);
	if (!dev_mutex) {
		kfree(res);
		return ERR_PTR(-ENOMEM);
	}

	for (;;) {
		err = nfp_resource_try_acquire(cpp, res, dev_mutex);
		if (!err)
			break;
		if (err != -EBUSY)
			goto err_free;

		err = msleep_interruptible(1);
		if (err != 0) {
			err = -ERESTARTSYS;
			goto err_free;
		}

		if (time_is_before_eq_jiffies(warn_at)) {
			warn_at = jiffies + 60 * HZ;
			nfp_warn(cpp, "Warning: waiting for NFP resource %s\n",
				 name);
		}
	}

	nfp_cpp_mutex_free(dev_mutex);

	return res;

err_free:
	nfp_cpp_mutex_free(dev_mutex);
	kfree(res);
	return ERR_PTR(err);
}
예제 #4
0
/* Read memory and check if it could be a valid MIP */
static int
nfp_mip_try_read(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, struct nfp_mip *mip)
{
	int ret;

	ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
	if (ret != sizeof(*mip)) {
		nfp_err(cpp, "Failed to read MIP data (%d, %zu)\n",
			ret, sizeof(*mip));
		return -EIO;
	}
	if (mip->signature != NFP_MIP_SIGNATURE) {
		nfp_warn(cpp, "Incorrect MIP signature (0x%08x)\n",
			 le32_to_cpu(mip->signature));
		return -EINVAL;
	}
	if (mip->mip_version != NFP_MIP_VERSION) {
		nfp_warn(cpp, "Unsupported MIP version (%d)\n",
			 le32_to_cpu(mip->mip_version));
		return -EINVAL;
	}

	return 0;
}
예제 #5
0
파일: nfp_main.c 프로젝트: asmalldev/linux
static void nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
{
#ifdef CONFIG_PCI_IOV
	int err;

	pf->limit_vfs = nfp_rtsym_read_le(pf->cpp, "nfd_vf_cfg_max_vfs", &err);
	if (!err)
		return;

	pf->limit_vfs = ~0;
	/* Allow any setting for backwards compatibility if symbol not found */
	if (err != -ENOENT)
		nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
#endif
}
예제 #6
0
파일: main.c 프로젝트: krzk/linux
static int
nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
			   int new_mtu)
{
	struct nfp_flower_priv *app_priv = app->priv;
	struct nfp_repr *repr = netdev_priv(netdev);
	int err, ack;

	/* Only need to config FW for physical port MTU change. */
	if (repr->port->type != NFP_PORT_PHYS_PORT)
		return 0;

	if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
		nfp_err(app->cpp, "Physical port MTU setting not supported\n");
		return -EINVAL;
	}

	spin_lock_bh(&app_priv->mtu_conf.lock);
	app_priv->mtu_conf.ack = false;
	app_priv->mtu_conf.requested_val = new_mtu;
	app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
	spin_unlock_bh(&app_priv->mtu_conf.lock);

	err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
				      true);
	if (err) {
		spin_lock_bh(&app_priv->mtu_conf.lock);
		app_priv->mtu_conf.requested_val = 0;
		spin_unlock_bh(&app_priv->mtu_conf.lock);
		return err;
	}

	/* Wait for fw to ack the change. */
	ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
				 nfp_flower_check_ack(app_priv),
				 msecs_to_jiffies(10));

	if (!ack) {
		spin_lock_bh(&app_priv->mtu_conf.lock);
		app_priv->mtu_conf.requested_val = 0;
		spin_unlock_bh(&app_priv->mtu_conf.lock);
		nfp_warn(app->cpp, "MTU change not verified with fw\n");
		return -EIO;
	}

	return 0;
}
예제 #7
0
파일: main.c 프로젝트: krzk/linux
static void
nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
{
	struct nfp_repr *repr = netdev_priv(netdev);
	struct nfp_flower_priv *priv = app->priv;
	atomic_t *replies = &priv->reify_replies;
	int err;

	atomic_set(replies, 0);
	err = nfp_flower_cmsg_portreify(repr, false);
	if (err) {
		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
		return;
	}

	nfp_flower_wait_repr_reify(app, replies, 1);
}
예제 #8
0
파일: main.c 프로젝트: krzk/linux
static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
				 unsigned int id)
{
	if (id > 0) {
		nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
		goto err_invalid_port;
	}

	eth_hw_addr_random(nn->dp.netdev);
	netif_keep_dst(nn->dp.netdev);
	nn->vnic_no_name = true;

	return 0;

err_invalid_port:
	nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
	return PTR_ERR_OR_ZERO(nn->port);
}
예제 #9
0
파일: main.c 프로젝트: avagin/linux
static int
nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
			      u32 length)
{
	if (length < 4) {
		nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n",
			length);
		return -EINVAL;
	}

	bpf->abi_version = readl(value);
	if (bpf->abi_version < 2 || bpf->abi_version > 3) {
		nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
			 bpf->abi_version);
		bpf->abi_version = 0;
	}

	return 0;
}
예제 #10
0
파일: nfp_main.c 프로젝트: avagin/linux
/* Callers should hold the devlink instance lock */
int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
		 void *out_data, u64 out_length)
{
	unsigned long err_at;
	u64 max_data_sz;
	u32 val = 0;
	int n, err;

	if (!pf->mbox)
		return -EOPNOTSUPP;

	max_data_sz = nfp_rtsym_size(pf->mbox) - NFP_MBOX_SYM_MIN_SIZE;

	/* Check if cmd field is clear */
	err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val);
	if (err || val) {
		nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n",
			 cmd, val, err);
		return err ?: -EBUSY;
	}
예제 #11
0
파일: main.c 프로젝트: krzk/linux
static int
nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
{
	struct nfp_flower_priv *priv = app->priv;
	int err;

	if (!tot_repl)
		return 0;

	lockdep_assert_held(&app->pf->lock);
	err = wait_event_interruptible_timeout(priv->reify_wait_queue,
					       atomic_read(replies) >= tot_repl,
					       msecs_to_jiffies(10));
	if (err <= 0) {
		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
		return -EIO;
	}

	return 0;
}
예제 #12
0
/**
 * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
 * @mutex:	NFP CPP Mutex handle
 *
 * Return: 0 on success, or -errno on failure
 */
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
{
	unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ;
	unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ;
	unsigned int timeout_ms = 1;
	int err;

	/* We can't use a waitqueue here, because the unlocker
	 * might be on a separate CPU.
	 *
	 * So just wait for now.
	 */
	for (;;) {
		err = nfp_cpp_mutex_trylock(mutex);
		if (err != -EBUSY)
			break;

		err = msleep_interruptible(timeout_ms);
		if (err != 0) {
			nfp_info(mutex->cpp,
				 "interrupted waiting for NFP mutex\n");
			return -ERESTARTSYS;
		}

		if (time_is_before_eq_jiffies(warn_at)) {
			warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
			nfp_warn(mutex->cpp,
				 "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n",
				 mutex->depth,
				 mutex->target, mutex->address, mutex->key);
		}
		if (time_is_before_eq_jiffies(err_at)) {
			nfp_err(mutex->cpp, "Error: mutex wait timed out\n");
			return -EBUSY;
		}
	}

	return err;
}
예제 #13
0
/* Perform a soft reset of the NFP6000:
 *   - Disable traffic ingress
 *   - Verify all NBI MAC packet buffers have returned
 *   - Wait for PCIE DMA Queues to empty
 *   - Stop all MEs
 *   - Clear all PCIe DMA Queues
 *   - Reset MAC NBI gaskets
 *   - Verify that all NBI/MAC buffers/credits have returned
 *   - Soft reset subcomponents relevant to this model
 *     - TODO: Crypto reset
 */
static int nfp6000_reset_soft(struct nfp_device *nfp)
{
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	struct nfp_nbi_dev *nbi[2] = {};
	struct nfp_resource *res;
	int mac_enable[2];
	int i, p, err, nbi_mask = 0;
	u32 bpe[2][32];
	int bpes[2];

	/* Lock out the MAC from any stats updaters,
	 * such as the NSP
	 */
	res = nfp_resource_acquire(nfp, NFP_RESOURCE_MAC_STATISTICS);
	if (!res)
		return -EBUSY;

	for (i = 0; i < 2; i++) {
		u32 tmp;
		int state;

		err = nfp_power_get(nfp, NFP6000_DEVICE_NBI(i, 0), &state);
		if (err < 0) {
			if (err == -ENODEV) {
				nbi[i] = NULL;
				continue;
			}
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON) {
			nbi[i] = NULL;
			continue;
		}

		nbi[i] = nfp_nbi_open(nfp, i);
		if (!nbi[i])
			continue;

		nbi_mask |= BIT(i);

		err = nfp_nbi_mac_regr(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST,
				       &tmp);
		if (err < 0)
			goto exit;

		mac_enable[i] = 0;
		if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST))
			mac_enable[i] |= BIT(0);
		if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST))
			mac_enable[i] |= BIT(1);

		/* No MACs at all? Then we don't care. */
		if (mac_enable[i] == 0) {
			nfp_nbi_close(nbi[i]);
			nbi[i] = NULL;
			continue;
		}

		/* Make sure we have the BPE list */
		err = bpe_lookup(nfp, i, &bpe[i][0], ARRAY_SIZE(bpe[i]));
		if (err < 0)
			goto exit;

		bpes[i] = err;
	}

	/* Verify that traffic ingress is disabled */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		for (p = 0; p < 24; p++) {
			u32 r, mask, tmp;

			mask =  NFP_NBI_MACX_ETH_SEG_CMD_CONFIG_ETH_RX_ENA;
			r =  NFP_NBI_MACX_ETH_SEG_CMD_CONFIG(p % 12);

			err = nfp_nbi_mac_regr(nbi[i],
					       NFP_NBI_MACX_ETH(p / 12),
					       r, &tmp);
			if (err < 0) {
				nfp_err(nfp, "Can't verify RX is disabled for port %d.%d\n",
					i, p);
				goto exit;
			}

			if (tmp & mask) {
				nfp_warn(nfp, "HAZARD: RX for traffic was not disabled by firmware for port %d.%d\n",
					 i, p);
			}

			err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_ETH(p / 12),
					       r, mask, 0);
			if (err < 0) {
				nfp_err(nfp, "Can't disable RX traffic for port %d.%d\n",
					i, p);
				goto exit;
			}
		}
	}

	/* Wait for packets to drain from NBI to NFD or to be freed.
	 * Worst case guess is:
	 *      512 pkts per CTM, 12 MEs per CTM, 800MHz clock rate
	 *      ~1000 cycles to sink a single packet.
	 *      512/12 = 42 pkts per ME, therefore 1000*42=42,000 cycles
	 *      42K cycles at 800Mhz = 52.5us. Round up to 60us.
	 *
	 * TODO: Account for cut-through traffic.
	 */
	usleep_range(60, 100);

	/* Verify all NBI MAC packet buffers have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]);
		if (err < 0)
			goto exit;
	}

	/* Wait for PCIE DMA Queues to empty.
	 *
	 *  How we calculate the wait time for DMA Queues to be empty:
	 *
	 *  Max CTM buffers that could be enqueued to one island:
	 *  512 x (7 ME islands + 2 other islands) = 4608 CTM buffers
	 *
	 *  The minimum rate at which NFD would process that ring would
	 *  occur if NFD records the queues as "up" so that it DMAs the
	 *  whole packet to the host, and if the CTM buffers in the ring
	 *  are all associated with jumbo frames.
	 *
	 *  Jumbo frames are <10kB, and NFD 3.0 processes ToPCI jumbo
	 *  frames at ±35Gbps (measured on star fighter card).
	 *  35e9 / 10 x 1024 x 8 = 427kpps.
	 *
	 *  The time to empty a ring holding 4608 packets at 427kpps
	 *  is 10.79ms.
	 *
	 *  To be conservative we round up to nearest whole number, i.e. 11ms.
	 */
	mdelay(11);

	/* Check all PCIE DMA Queues are empty. */
	for (i = 0; i < 4; i++) {
		int state;
	int empty;
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty);
		if (err < 0)
			goto exit;

		if (!empty) {
			nfp_err(nfp, "PCI%d DMA queues did not drain\n", i);
			err = -ETIMEDOUT;
			goto exit;
		}

		/* Set ARM PCIe Monitor to defaults */
		err = nfp6000_pcie_monitor_set(cpp, i, 0);
		if (err < 0)
			goto exit;
	}

	/* Stop all MEs */
	for (i = 0; i < 64; i++) {
		err = nfp6000_stop_me_island(nfp, i);
		if (err < 0)
			goto exit;
	}

	/* Verify again that PCIe DMA Queues are now empty */
	for (i = 0; i < 4; i++) {
		int state;
	int empty;
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty);
		if (err < 0)
			goto exit;

		if (!empty) {
			nfp_err(nfp, "PCI%d DMA queue is not empty\n", i);
			err = -ETIMEDOUT;
			goto exit;
		}
	}

	/* Clear all PCIe DMA Queues */
	for (i = 0; i < 4; i++) {
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);
		int state;
		const u32 pci = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_PCIE,
						       3, 0, i + 4);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		for (p = 0; p < 256; p++) {
			u32 q = NFP_PCIE_Q(p);

			err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_LO,
					     NFP_QCTLR_STS_LO_RPTR_ENABLE);
			if (err < 0)
				goto exit;

			err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_HI,
					     NFP_QCTLR_STS_HI_EMPTY);
			if (err < 0)
				goto exit;
		}
	}

	/* Reset MAC NBI gaskets */
	for (i = 0; i < 2; i++) {
		u32 mask = NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_MPB |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_MPB |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_CORE |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_CORE |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST;

		if (!nbi[i])
			continue;

		err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST, mask, mask);
		if (err < 0)
			goto exit;

		err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST, mask, 0);
		if (err < 0)
			goto exit;
	}

	/* Wait for the reset to propagate */
	usleep_range(60, 100);

	/* Verify all NBI MAC packet buffers have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]);
		if (err < 0)
			goto exit;
	}

	/* Verify that all NBI/MAC credits have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_check_dma_credits(nfp, nbi[i],
						    &bpe[i][0], bpes[i]);
		if (err < 0)
			goto exit;
	}

	/* Soft reset subcomponents relevant to this model */
	err = nfp6000_island_reset(nfp, nbi_mask);
	if (err < 0)
		goto exit;

	err = nfp6000_island_on(nfp, nbi_mask);
	if (err < 0)
		goto exit;

exit:
	/* No need for NBI access anymore.. */
	for (i = 0; i < 2; i++) {
		if (nbi[i])
			nfp_nbi_close(nbi[i]);
	}

	nfp_resource_release(res);

	return err;
}
예제 #14
0
파일: main.c 프로젝트: krzk/linux
static int
nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
			    enum nfp_flower_cmsg_port_vnic_type vnic_type,
			    enum nfp_repr_type repr_type, unsigned int cnt)
{
	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
	struct nfp_flower_priv *priv = app->priv;
	atomic_t *replies = &priv->reify_replies;
	struct nfp_flower_repr_priv *repr_priv;
	enum nfp_port_type port_type;
	struct nfp_repr *nfp_repr;
	struct nfp_reprs *reprs;
	int i, err, reify_cnt;
	const u8 queue = 0;

	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
						    NFP_PORT_VF_PORT;

	reprs = nfp_reprs_alloc(cnt);
	if (!reprs)
		return -ENOMEM;

	for (i = 0; i < cnt; i++) {
		struct net_device *repr;
		struct nfp_port *port;
		u32 port_id;

		repr = nfp_repr_alloc(app);
		if (!repr) {
			err = -ENOMEM;
			goto err_reprs_clean;
		}

		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
		if (!repr_priv) {
			err = -ENOMEM;
			goto err_reprs_clean;
		}

		nfp_repr = netdev_priv(repr);
		nfp_repr->app_priv = repr_priv;

		/* For now we only support 1 PF */
		WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);

		port = nfp_port_alloc(app, port_type, repr);
		if (IS_ERR(port)) {
			err = PTR_ERR(port);
			nfp_repr_free(repr);
			goto err_reprs_clean;
		}
		if (repr_type == NFP_REPR_TYPE_PF) {
			port->pf_id = i;
			port->vnic = priv->nn->dp.ctrl_bar;
		} else {
			port->pf_id = 0;
			port->vf_id = i;
			port->vnic =
				app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
		}

		eth_hw_addr_random(repr);

		port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
						    i, queue);
		err = nfp_repr_init(app, repr,
				    port_id, port, priv->nn->dp.netdev);
		if (err) {
			nfp_port_free(port);
			nfp_repr_free(repr);
			goto err_reprs_clean;
		}

		RCU_INIT_POINTER(reprs->reprs[i], repr);
		nfp_info(app->cpp, "%s%d Representor(%s) created\n",
			 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
			 repr->name);
	}

	nfp_app_reprs_set(app, repr_type, reprs);

	atomic_set(replies, 0);
	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
	if (reify_cnt < 0) {
		err = reify_cnt;
		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
		goto err_reprs_remove;
	}

	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
	if (err)
		goto err_reprs_remove;

	return 0;
err_reprs_remove:
	reprs = nfp_app_reprs_set(app, repr_type, NULL);
err_reprs_clean:
	nfp_reprs_clean_and_free(app, reprs);
	return err;
}
예제 #15
0
파일: main.c 프로젝트: krzk/linux
static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
	atomic_t *replies = &priv->reify_replies;
	struct nfp_flower_repr_priv *repr_priv;
	struct nfp_repr *nfp_repr;
	struct sk_buff *ctrl_skb;
	struct nfp_reprs *reprs;
	int err, reify_cnt;
	unsigned int i;

	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
	if (!ctrl_skb)
		return -ENOMEM;

	reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
	if (!reprs) {
		err = -ENOMEM;
		goto err_free_ctrl_skb;
	}

	for (i = 0; i < eth_tbl->count; i++) {
		unsigned int phys_port = eth_tbl->ports[i].index;
		struct net_device *repr;
		struct nfp_port *port;
		u32 cmsg_port_id;

		repr = nfp_repr_alloc(app);
		if (!repr) {
			err = -ENOMEM;
			goto err_reprs_clean;
		}

		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
		if (!repr_priv) {
			err = -ENOMEM;
			goto err_reprs_clean;
		}

		nfp_repr = netdev_priv(repr);
		nfp_repr->app_priv = repr_priv;

		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
		if (IS_ERR(port)) {
			err = PTR_ERR(port);
			nfp_repr_free(repr);
			goto err_reprs_clean;
		}
		err = nfp_port_init_phy_port(app->pf, app, port, i);
		if (err) {
			nfp_port_free(port);
			nfp_repr_free(repr);
			goto err_reprs_clean;
		}

		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
		nfp_net_get_mac_addr(app->pf, repr, port);

		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
		err = nfp_repr_init(app, repr,
				    cmsg_port_id, port, priv->nn->dp.netdev);
		if (err) {
			nfp_port_free(port);
			nfp_repr_free(repr);
			goto err_reprs_clean;
		}

		nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
					     eth_tbl->ports[i].nbi,
					     eth_tbl->ports[i].base,
					     phys_port);

		RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
		nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
			 phys_port, repr->name);
	}

	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);

	/* The REIFY/MAC_REPR control messages should be sent after the MAC
	 * representors are registered using nfp_app_reprs_set().  This is
	 * because the firmware may respond with control messages for the
	 * MAC representors, f.e. to provide the driver with information
	 * about their state, and without registration the driver will drop
	 * any such messages.
	 */
	atomic_set(replies, 0);
	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
	if (reify_cnt < 0) {
		err = reify_cnt;
		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
		goto err_reprs_remove;
	}

	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
	if (err)
		goto err_reprs_remove;

	nfp_ctrl_tx(app->ctrl, ctrl_skb);

	return 0;
err_reprs_remove:
	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
err_reprs_clean:
	nfp_reprs_clean_and_free(app, reprs);
err_free_ctrl_skb:
	kfree_skb(ctrl_skb);
	return err;
}
예제 #16
0
파일: main.c 프로젝트: krzk/linux
static int nfp_flower_init(struct nfp_app *app)
{
	const struct nfp_pf *pf = app->pf;
	struct nfp_flower_priv *app_priv;
	u64 version, features;
	int err;

	if (!pf->eth_tbl) {
		nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
		return -EINVAL;
	}

	if (!pf->mac_stats_bar) {
		nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
		return -EINVAL;
	}

	if (!pf->vf_cfg_bar) {
		nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
		return -EINVAL;
	}

	version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
	if (err) {
		nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
		return err;
	}

	/* We need to ensure hardware has enough flower capabilities. */
	if (version != NFP_FLOWER_ALLOWED_VER) {
		nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
		return -EINVAL;
	}

	app_priv = vzalloc(sizeof(struct nfp_flower_priv));
	if (!app_priv)
		return -ENOMEM;

	app->priv = app_priv;
	app_priv->app = app;
	skb_queue_head_init(&app_priv->cmsg_skbs_high);
	skb_queue_head_init(&app_priv->cmsg_skbs_low);
	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
	init_waitqueue_head(&app_priv->reify_wait_queue);

	init_waitqueue_head(&app_priv->mtu_conf.wait_q);
	spin_lock_init(&app_priv->mtu_conf.lock);

	err = nfp_flower_metadata_init(app);
	if (err)
		goto err_free_app_priv;

	/* Extract the extra features supported by the firmware. */
	features = nfp_rtsym_read_le(app->pf->rtbl,
				     "_abi_flower_extra_features", &err);
	if (err)
		app_priv->flower_ext_feats = 0;
	else
		app_priv->flower_ext_feats = features;

	/* Tell the firmware that the driver supports lag. */
	err = nfp_rtsym_write_le(app->pf->rtbl,
				 "_abi_flower_balance_sync_enable", 1);
	if (!err) {
		app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
		nfp_flower_lag_init(&app_priv->nfp_lag);
	} else if (err == -ENOENT) {
		nfp_warn(app->cpp, "LAG not supported by FW.\n");
	} else {
		goto err_cleanup_metadata;
	}

	return 0;

err_cleanup_metadata:
	nfp_flower_metadata_cleanup(app);
err_free_app_priv:
	vfree(app->priv);
	return err;
}
예제 #17
0
/**
 * nfp_nsp_command() - Execute a command on the NFP Service Processor
 * @nfp:	NFP Device handle
 * @code:	NSP Command Code
 *
 * Return: 0 for success with no result
 *
 *         1..255 for NSP completion with a result code
 *
 *         -EAGAIN if the NSP is not yet present
 *
 *         -ENODEV if the NSP is not a supported model
 *
 *         -EBUSY if the NSP is stuck
 *
 *         -EINTR if interrupted while waiting for completion
 *
 *         -ETIMEDOUT if the NSP took longer than 30 seconds to complete
 */
int nfp_nsp_command(struct nfp_device *nfp, uint16_t code, u32 option,
		    u32 buff_cpp, u64 buff_addr)
{
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	struct nfp_nsp *nsp;
	u32 nsp_cpp;
	u64 nsp_base;
	u64 nsp_status;
	u64 nsp_command;
	u64 nsp_buffer;
	int err, ok;
	u64 tmp;
	int timeout = 30 * 10;	/* 30 seconds total */

	nsp = nfp_device_private(nfp, nfp_nsp_con);
	if (!nsp)
		return -EAGAIN;

	nsp_cpp = nfp_resource_cpp_id(nsp->res);
	nsp_base = nfp_resource_address(nsp->res);
	nsp_status = nsp_base + NSP_STATUS;
	nsp_command = nsp_base + NSP_COMMAND;
	nsp_buffer = nsp_base + NSP_BUFFER;

	err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &tmp);
	if (err < 0)
		return err;

	if (NSP_MAGIC != NSP_STATUS_MAGIC_of(tmp)) {
		nfp_err(nfp, "NSP: Cannot detect NFP Service Processor\n");
		return -ENODEV;
	}

	ok = NSP_STATUS_MAJOR_of(tmp) == NSP_CODE_MAJOR_of(code) &&
	     NSP_STATUS_MINOR_of(tmp) >= NSP_CODE_MINOR_of(code);
	if (!ok) {
		nfp_err(nfp, "NSP: Code 0x%04x not supported (ABI %d.%d)\n",
			code,
			(int)NSP_STATUS_MAJOR_of(tmp),
			(int)NSP_STATUS_MINOR_of(tmp));
		return -EINVAL;
	}

	if (tmp & NSP_STATUS_BUSY) {
		nfp_err(nfp, "NSP: Service processor busy!\n");
		return -EBUSY;
	}

	err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
			     NSP_BUFFER_CPP(buff_cpp) |
			     NSP_BUFFER_ADDRESS(buff_addr));
	if (err < 0)
		return err;

	err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
			     NSP_COMMAND_OPTION(option) |
			     NSP_COMMAND_CODE(code) | NSP_COMMAND_START);
	if (err < 0)
		return err;

	/* Wait for NSP_COMMAND_START to go to 0 */
	for (; timeout > 0; timeout--) {
		err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &tmp);
		if (err < 0)
			return err;

		if (!(tmp & NSP_COMMAND_START))
			break;

		if (msleep_interruptible(100) > 0) {
			nfp_warn(nfp, "NSP: Interrupt waiting for code 0x%04x to start\n",
				 code);
			return -EINTR;
		}
	}

	if (timeout < 0) {
		nfp_warn(nfp, "NSP: Timeout waiting for code 0x%04x to start\n",
			 code);
		return -ETIMEDOUT;
	}

	/* Wait for NSP_STATUS_BUSY to go to 0 */
	for (; timeout > 0; timeout--) {
		err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &tmp);
		if (err < 0)
			return err;

		if (!(tmp & NSP_STATUS_BUSY))
			break;

		if (msleep_interruptible(100) > 0) {
			nfp_warn(nfp, "NSP: Interrupt waiting for code 0x%04x to complete\n",
				 code);
			return -EINTR;
		}
	}

	if (timeout < 0) {
		nfp_warn(nfp, "NSP: Timeout waiting for code 0x%04x to complete\n",
			 code);
		return -ETIMEDOUT;
	}

	err = NSP_STATUS_RESULT_of(tmp);
	if (err > 0)
		return -err;

	err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &tmp);
	if (err < 0)
		return err;

	return NSP_COMMAND_OPTION_of(tmp);
}
예제 #18
0
파일: main.c 프로젝트: Lyude/linux
static int
nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
		    struct tc_red_qopt_offload *opt)
{
	bool existing;
	int i, err;

	i = nfp_abm_red_find(alink, opt);
	existing = i >= 0;

	if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
		nfp_warn(alink->abm->app->cpp,
			 "RED offload failed - unsupported parameters\n");
		err = -EINVAL;
		goto err_destroy;
	}

	if (existing) {
		if (alink->parent == TC_H_ROOT)
			err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
		else
			err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
		if (err)
			goto err_destroy;
		return 0;
	}

	if (opt->parent == TC_H_ROOT) {
		i = 0;
		err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
					   opt->set.min);
	} else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
		i = TC_H_MIN(opt->parent) - 1;
		err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
	} else {
		return -EINVAL;
	}
	/* Set the handle to try full clean up, in case IO failed */
	alink->qdiscs[i].handle = opt->handle;
	if (err)
		goto err_destroy;

	if (opt->parent == TC_H_ROOT)
		err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
	else
		err = nfp_abm_ctrl_read_q_stats(alink, i,
						&alink->qdiscs[i].stats);
	if (err)
		goto err_destroy;

	if (opt->parent == TC_H_ROOT)
		err = nfp_abm_ctrl_read_xstats(alink,
					       &alink->qdiscs[i].xstats);
	else
		err = nfp_abm_ctrl_read_q_xstats(alink, i,
						 &alink->qdiscs[i].xstats);
	if (err)
		goto err_destroy;

	alink->qdiscs[i].stats.backlog_pkts = 0;
	alink->qdiscs[i].stats.backlog_bytes = 0;

	return 0;
err_destroy:
	/* If the qdisc keeps on living, but we can't offload undo changes */
	if (existing) {
		opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
		opt->set.qstats->backlog -=
			alink->qdiscs[i].stats.backlog_bytes;
	}
	nfp_abm_red_destroy(netdev, alink, opt->handle);

	return err;
}