Пример #1
0
static int
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
{
	struct nfp_pf *pf = app->pf;
	struct nfp_bpf_vnic *bv;
	int err;

	if (!pf->eth_tbl) {
		nfp_err(pf->cpp, "No ETH table\n");
		return -EINVAL;
	}
	if (pf->max_data_vnics != pf->eth_tbl->count) {
		nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
			pf->max_data_vnics, pf->eth_tbl->count);
		return -EINVAL;
	}

	bv = kzalloc(sizeof(*bv), GFP_KERNEL);
	if (!bv)
		return -ENOMEM;
	nn->app_priv = bv;

	err = nfp_app_nic_vnic_alloc(app, nn, id);
	if (err)
		goto err_free_priv;

	bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
	bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);

	return 0;
err_free_priv:
	kfree(nn->app_priv);
	return err;
}
Пример #2
0
static int
nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
			      u32 length)
{
	struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
	struct nfp_cpp *cpp = bpf->app->pf->cpp;

	if (length < sizeof(*cap)) {
		nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
		return -EINVAL;
	}

	bpf->adjust_head.flags = readl(&cap->flags);
	bpf->adjust_head.off_min = readl(&cap->off_min);
	bpf->adjust_head.off_max = readl(&cap->off_max);
	bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
	bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);

	if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
		nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
		return -EINVAL;
	}
	if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
	    !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
		nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
		memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
		return 0;
	}

	return 0;
}
Пример #3
0
/**
 * nfp_reset_soft() - Perform a soft reset of the NFP
 * @nfp:	NFP Device handle
 *
 * Return: 0, or -ERRNO
 */
int nfp_reset_soft(struct nfp_device *nfp)
{
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	struct nfp_cpp_area *area;
	struct nfp_resource *res;
	u32 model;
	int i, err;

	model = nfp_cpp_model(cpp);

	/* Claim the nfp.nffw resource page */
	res = nfp_resource_acquire(nfp, NFP_RESOURCE_NFP_NFFW);
	if (IS_ERR(res)) {
		nfp_err(nfp, "Can't aquire %s resource\n",
			NFP_RESOURCE_NFP_NFFW);
		return -EBUSY;
	}

	if (NFP_CPP_MODEL_IS_3200(model))
		err = nfp3200_reset_soft(nfp);
	else if (NFP_CPP_MODEL_IS_6000(model))
		err = nfp6000_reset_soft(nfp);
	else
		err = -EINVAL;

	if (err < 0)
		goto exit;

	/* Clear all NFP NFFW page */
	area = nfp_cpp_area_alloc_acquire(cpp, nfp_resource_cpp_id(res),
					  nfp_resource_address(res),
					  nfp_resource_size(res));
	if (!area) {
		nfp_err(nfp, "Can't acquire area for %s resource\n",
			NFP_RESOURCE_NFP_NFFW);
		err = -ENOMEM;
		goto exit;
	}

	for (i = 0; i < nfp_resource_size(res); i += 8) {
		err = nfp_cpp_area_writeq(area, i, 0);
		if (err < 0)
			break;
	}
	nfp_cpp_area_release_free(area);

	if (err < 0) {
		nfp_err(nfp, "Can't erase area of %s resource\n",
			NFP_RESOURCE_NFP_NFFW);
		goto exit;
	}

	err = 0;

exit:
	nfp_resource_release(res);

	return err;
}
Пример #4
0
static int bpe_lookup(struct nfp_device *nfp, int nbi, u32 *bpe, int bpe_max)
{
	int err, i;
	const struct nfp_rtsym *sym;
	u32 id, tmp;
	u32 __iomem *ptr;
	struct nfp_cpp_area *area;
	char buff[] = "nbi0_dma_bpe_credits";

	buff[3] += nbi;

	sym = nfp_rtsym_lookup(nfp, buff);
	if (!sym) {
		nfp_info(nfp, "%s: Symbol not present\n", buff);
		return 0;
	}

	id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
	area = nfp_cpp_area_alloc_acquire(nfp_device_cpp(nfp), id, sym->addr,
					  sym->size);
	if (IS_ERR_OR_NULL(area)) {
		nfp_err(nfp, "%s: Can't acquire area\n", buff);
		return area ? PTR_ERR(area) : -ENOMEM;
	}

	ptr = nfp_cpp_area_iomem(area);
	if (IS_ERR_OR_NULL(ptr)) {
		nfp_err(nfp, "%s: Can't map area\n", buff);
		err = ptr ? PTR_ERR(ptr) : -ENOMEM;
		goto exit;
	}

	tmp = readl(ptr++);
	if (!BPECFG_MAGIC_CHECK(tmp)) {
		nfp_err(nfp, "%s: Magic value (0x%08x) unrecognized\n",
			buff, tmp);
		err = -EINVAL;
		goto exit;
	}

	if (BPECFG_MAGIC_COUNT(tmp) > bpe_max) {
		nfp_err(nfp, "%s: Magic count (%d) too large (> %d)\n",
			buff, BPECFG_MAGIC_COUNT(tmp), bpe_max);
		err = -EINVAL;
		goto exit;
	}

	for (i = 0; i < bpe_max; i++)
		bpe[i] = readl(ptr++);

	err = BPECFG_MAGIC_COUNT(tmp);

exit:
	nfp_cpp_area_release_free(area);
	return err;
}
Пример #5
0
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
	struct nfp_app *app;

	if (id >= ARRAY_SIZE(apps) || !apps[id]) {
		nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
		return ERR_PTR(-EINVAL);
	}

	if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc))
		return ERR_PTR(-EINVAL);
	if (WARN_ON(!apps[id]->ctrl_msg_rx && apps[id]->ctrl_msg_rx_raw))
		return ERR_PTR(-EINVAL);

	app = kzalloc(sizeof(*app), GFP_KERNEL);
	if (!app)
		return ERR_PTR(-ENOMEM);

	app->pf = pf;
	app->cpp = pf->cpp;
	app->pdev = pf->pdev;
	app->type = apps[id];

	return app;
}
Пример #6
0
static int
nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
{
	struct nfp_bpf_cap_tlv_func __iomem *cap = value;

	if (length < sizeof(*cap)) {
		nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
		return -EINVAL;
	}

	switch (readl(&cap->func_id)) {
	case BPF_FUNC_map_lookup_elem:
		bpf->helpers.map_lookup = readl(&cap->func_addr);
		break;
	case BPF_FUNC_map_update_elem:
		bpf->helpers.map_update = readl(&cap->func_addr);
		break;
	case BPF_FUNC_map_delete_elem:
		bpf->helpers.map_delete = readl(&cap->func_addr);
		break;
	case BPF_FUNC_perf_event_output:
		bpf->helpers.perf_event_output = readl(&cap->func_addr);
		break;
	}

	return 0;
}
Пример #7
0
static struct nfp_hwinfo *
hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
{
	struct nfp_hwinfo *header;
	struct nfp_resource *res;
	u64 cpp_addr;
	u32 cpp_id;
	int err;
	u8 *db;

	res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO);
	if (!IS_ERR(res)) {
		cpp_id = nfp_resource_cpp_id(res);
		cpp_addr = nfp_resource_address(res);
		*cpp_size = nfp_resource_size(res);

		nfp_resource_release(res);

		if (*cpp_size < HWINFO_SIZE_MIN)
			return NULL;
	} else if (PTR_ERR(res) == -ENOENT) {
		/* Try getting the HWInfo table from the 'classic' location */
		cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU,
					   NFP_CPP_ACTION_RW, 0, 1);
		cpp_addr = 0x30000;
		*cpp_size = 0x0e000;
	} else {
		return NULL;
	}

	db = kmalloc(*cpp_size + 1, GFP_KERNEL);
	if (!db)
		return NULL;

	err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
	if (err != *cpp_size)
		goto exit_free;

	header = (void *)db;
	if (nfp_hwinfo_is_updating(header))
		goto exit_free;

	if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) {
		nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n",
			le32_to_cpu(header->version));
		goto exit_free;
	}

	/* NULL-terminate for safety */
	db[*cpp_size] = '\0';

	return (void *)db;
exit_free:
	kfree(db);
	return NULL;
}
Пример #8
0
static int
hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len)
{
	u32 size, crc;

	size = le32_to_cpu(db->size);
	if (size > len) {
		nfp_err(cpp, "Unsupported hwinfo size %u > %u\n", size, len);
		return -EINVAL;
	}

	size -= sizeof(u32);
	crc = crc32_posix(db, size);
	if (crc != get_unaligned_le32(db->start + size)) {
		nfp_err(cpp, "Corrupt hwinfo table (CRC mismatch), calculated 0x%x, expected 0x%x\n",
			crc, get_unaligned_le32(db->start + size));

		return -EINVAL;
	}

	return hwinfo_db_walk(cpp, db, size);
}
Пример #9
0
static int nfp6000_stop_me(struct nfp_device *nfp, int island, int menum)
{
	int err;
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	u32 tmp;
	u32 me_r = NFP_CPP_ID(NFP_CPP_TARGET_CT_XPB, 2, 1);
	u32 me_w = NFP_CPP_ID(NFP_CPP_TARGET_CT_XPB, 3, 1);
	u64 mecsr = (island << 24) | NFP_CT_ME(menum);

	err = nfp_cpp_readl(cpp, me_r, mecsr + NFP_ME_CTXENABLES, &tmp);
	if (err < 0)
		return err;

	tmp &= ~(NFP_ME_CTXENABLES_INUSECONTEXTS |
		 NFP_ME_CTXENABLES_CTXENABLES(0xff));
	tmp &= ~NFP_ME_CTXENABLES_CSECCERROR;
	tmp &= ~NFP_ME_CTXENABLES_BREAKPOINT;
	tmp &= ~NFP_ME_CTXENABLES_REGISTERPARITYERR;

	err = nfp_cpp_writel(cpp, me_w, mecsr + NFP_ME_CTXENABLES, tmp);
	if (err < 0)
		return err;

	mdelay(1);

	/* This may seem like a rushed test, but in the 1 microsecond sleep
	 * the ME has executed about a 1000 instructions and even more during
	 * the time it took the host to execute this code and for the CPP
	 * command to reach the CSR in the test read anyway.
	 *
	 * If one of those instructions did not swap out, the code is a very
	 * inefficient single-threaded sequence of instructions which would
	 * be very rare or very specific.
	*/

	err = nfp_cpp_readl(cpp, me_r, mecsr + NFP_ME_ACTCTXSTATUS, &tmp);
	if (err < 0)
		return err;

	if (tmp & NFP_ME_ACTCTXSTATUS_AB0) {
		nfp_err(nfp, "ME%d.%d did not stop after 1000us\n",
			island, menum);
		return -EIO;
	}

	return 0;
}
Пример #10
0
Файл: main.c Проект: krzk/linux
static int
nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
			   int new_mtu)
{
	struct nfp_flower_priv *app_priv = app->priv;
	struct nfp_repr *repr = netdev_priv(netdev);
	int err, ack;

	/* Only need to config FW for physical port MTU change. */
	if (repr->port->type != NFP_PORT_PHYS_PORT)
		return 0;

	if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
		nfp_err(app->cpp, "Physical port MTU setting not supported\n");
		return -EINVAL;
	}

	spin_lock_bh(&app_priv->mtu_conf.lock);
	app_priv->mtu_conf.ack = false;
	app_priv->mtu_conf.requested_val = new_mtu;
	app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
	spin_unlock_bh(&app_priv->mtu_conf.lock);

	err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
				      true);
	if (err) {
		spin_lock_bh(&app_priv->mtu_conf.lock);
		app_priv->mtu_conf.requested_val = 0;
		spin_unlock_bh(&app_priv->mtu_conf.lock);
		return err;
	}

	/* Wait for fw to ack the change. */
	ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
				 nfp_flower_check_ack(app_priv),
				 msecs_to_jiffies(10));

	if (!ack) {
		spin_lock_bh(&app_priv->mtu_conf.lock);
		app_priv->mtu_conf.requested_val = 0;
		spin_unlock_bh(&app_priv->mtu_conf.lock);
		nfp_warn(app->cpp, "MTU change not verified with fw\n");
		return -EIO;
	}

	return 0;
}
Пример #11
0
/**
 * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
 * @cpp:	NFP CPP handle
 * @name:	Symbol name
 * @error:	Poniter to error code (optional)
 *
 * Lookup a symbol, map, read it and return it's value. Value of the symbol
 * will be interpreted as a simple little-endian unsigned value. Symbol can
 * be 4 or 8 bytes in size.
 *
 * Return: value read, on error sets the error and returns ~0ULL.
 */
u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
{
	const struct nfp_rtsym *sym;
	u32 val32, id;
	u64 val;
	int err;

	sym = nfp_rtsym_lookup(cpp, name);
	if (!sym) {
		err = -ENOENT;
		goto exit;
	}

	id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);

	switch (sym->size) {
	case 4:
		err = nfp_cpp_readl(cpp, id, sym->addr, &val32);
		val = val32;
		break;
	case 8:
		err = nfp_cpp_readq(cpp, id, sym->addr, &val);
		break;
	default:
		nfp_err(cpp,
			"rtsym '%s' unsupported or non-scalar size: %lld\n",
			name, sym->size);
		err = -EINVAL;
		break;
	}

	if (err == sym->size)
		err = 0;
	else if (err >= 0)
		err = -EIO;
exit:
	if (error)
		*error = err;

	if (err)
		return ~0ULL;
	return val;
}
Пример #12
0
int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
			       unsigned int default_val)
{
	char name[256];
	int err = 0;
	u64 val;

	snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));

	val = nfp_rtsym_read_le(pf->rtbl, name, &err);
	if (err) {
		if (err == -ENOENT)
			return default_val;
		nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
		return err;
	}

	return val;
}
Пример #13
0
static void nfp_fw_unload(struct nfp_pf *pf)
{
	struct nfp_nsp *nsp;
	int err;

	nsp = nfp_nsp_open(pf->cpp);
	if (IS_ERR(nsp)) {
		nfp_err(pf->cpp, "Reset failed, can't open NSP\n");
		return;
	}

	err = nfp_nsp_device_soft_reset(nsp);
	if (err < 0)
		dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err);
	else
		dev_info(&pf->pdev->dev, "Firmware safely unloaded\n");

	nfp_nsp_close(nsp);
}
Пример #14
0
static int
nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
			      u32 length)
{
	if (length < 4) {
		nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n",
			length);
		return -EINVAL;
	}

	bpf->abi_version = readl(value);
	if (bpf->abi_version < 2 || bpf->abi_version > 3) {
		nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
			 bpf->abi_version);
		bpf->abi_version = 0;
	}

	return 0;
}
Пример #15
0
static int
nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
{
	struct nfp_bpf_cap_tlv_maps __iomem *cap = value;

	if (length < sizeof(*cap)) {
		nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
		return -EINVAL;
	}

	bpf->maps.types = readl(&cap->types);
	bpf->maps.max_maps = readl(&cap->max_maps);
	bpf->maps.max_elems = readl(&cap->max_elems);
	bpf->maps.max_key_sz = readl(&cap->max_key_sz);
	bpf->maps.max_val_sz = readl(&cap->max_val_sz);
	bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);

	return 0;
}
Пример #16
0
static struct nfp_hwinfo *hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
{
	const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ;
	struct nfp_hwinfo *db;
	int err;

	for (;;) {
		const unsigned long start_time = jiffies;

		db = hwinfo_try_fetch(cpp, hwdb_size);
		if (db)
			return db;

		err = msleep_interruptible(100);
		if (err || time_after(start_time, wait_until)) {
			nfp_err(cpp, "NFP access error\n");
			return NULL;
		}
	}
}
Пример #17
0
struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp)
{
	struct nfp_nsp_identify *nspi = NULL;
	struct nsp_identify *ni;
	int ret;

	if (nfp_nsp_get_abi_ver_minor(nsp) < 15)
		return NULL;

	ni = kzalloc(sizeof(*ni), GFP_KERNEL);
	if (!ni)
		return NULL;

	ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
	if (ret < 0) {
		nfp_err(nfp_nsp_cpp(nsp), "reading bsp version failed %d\n",
			ret);
		goto exit_free;
	}

	nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
	if (!nspi)
		goto exit_free;

	memcpy(nspi->version, ni->version, sizeof(nspi->version));
	nspi->version[sizeof(nspi->version) - 1] = '\0';
	nspi->flags = ni->flags;
	nspi->br_primary = ni->br_primary;
	nspi->br_secondary = ni->br_secondary;
	nspi->br_nsp = ni->br_nsp;
	nspi->primary = le16_to_cpu(ni->primary);
	nspi->secondary = le16_to_cpu(ni->secondary);
	nspi->nsp = le16_to_cpu(ni->nsp);
	nspi->sensor_mask = le64_to_cpu(ni->sensor_mask);

exit_free:
	kfree(ni);
	return nspi;
}
Пример #18
0
/**
 * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
 * @mutex:	NFP CPP Mutex handle
 *
 * Return: 0 on success, or -errno on failure
 */
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
{
	unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ;
	unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ;
	unsigned int timeout_ms = 1;
	int err;

	/* We can't use a waitqueue here, because the unlocker
	 * might be on a separate CPU.
	 *
	 * So just wait for now.
	 */
	for (;;) {
		err = nfp_cpp_mutex_trylock(mutex);
		if (err != -EBUSY)
			break;

		err = msleep_interruptible(timeout_ms);
		if (err != 0) {
			nfp_info(mutex->cpp,
				 "interrupted waiting for NFP mutex\n");
			return -ERESTARTSYS;
		}

		if (time_is_before_eq_jiffies(warn_at)) {
			warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
			nfp_warn(mutex->cpp,
				 "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n",
				 mutex->depth,
				 mutex->target, mutex->address, mutex->key);
		}
		if (time_is_before_eq_jiffies(err_at)) {
			nfp_err(mutex->cpp, "Error: mutex wait timed out\n");
			return -EBUSY;
		}
	}

	return err;
}
Пример #19
0
/* Read memory and check if it could be a valid MIP */
static int
nfp_mip_try_read(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, struct nfp_mip *mip)
{
	int ret;

	ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
	if (ret != sizeof(*mip)) {
		nfp_err(cpp, "Failed to read MIP data (%d, %zu)\n",
			ret, sizeof(*mip));
		return -EIO;
	}
	if (mip->signature != NFP_MIP_SIGNATURE) {
		nfp_warn(cpp, "Incorrect MIP signature (0x%08x)\n",
			 le32_to_cpu(mip->signature));
		return -EINVAL;
	}
	if (mip->mip_version != NFP_MIP_VERSION) {
		nfp_warn(cpp, "Unsupported MIP version (%d)\n",
			 le32_to_cpu(mip->mip_version));
		return -EINVAL;
	}

	return 0;
}
Пример #20
0
/* Perform a soft reset of the NFP6000:
 *   - Disable traffic ingress
 *   - Verify all NBI MAC packet buffers have returned
 *   - Wait for PCIE DMA Queues to empty
 *   - Stop all MEs
 *   - Clear all PCIe DMA Queues
 *   - Reset MAC NBI gaskets
 *   - Verify that all NBI/MAC buffers/credits have returned
 *   - Soft reset subcomponents relevant to this model
 *     - TODO: Crypto reset
 */
static int nfp6000_reset_soft(struct nfp_device *nfp)
{
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	struct nfp_nbi_dev *nbi[2] = {};
	struct nfp_resource *res;
	int mac_enable[2];
	int i, p, err, nbi_mask = 0;
	u32 bpe[2][32];
	int bpes[2];

	/* Lock out the MAC from any stats updaters,
	 * such as the NSP
	 */
	res = nfp_resource_acquire(nfp, NFP_RESOURCE_MAC_STATISTICS);
	if (!res)
		return -EBUSY;

	for (i = 0; i < 2; i++) {
		u32 tmp;
		int state;

		err = nfp_power_get(nfp, NFP6000_DEVICE_NBI(i, 0), &state);
		if (err < 0) {
			if (err == -ENODEV) {
				nbi[i] = NULL;
				continue;
			}
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON) {
			nbi[i] = NULL;
			continue;
		}

		nbi[i] = nfp_nbi_open(nfp, i);
		if (!nbi[i])
			continue;

		nbi_mask |= BIT(i);

		err = nfp_nbi_mac_regr(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST,
				       &tmp);
		if (err < 0)
			goto exit;

		mac_enable[i] = 0;
		if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST))
			mac_enable[i] |= BIT(0);
		if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST))
			mac_enable[i] |= BIT(1);

		/* No MACs at all? Then we don't care. */
		if (mac_enable[i] == 0) {
			nfp_nbi_close(nbi[i]);
			nbi[i] = NULL;
			continue;
		}

		/* Make sure we have the BPE list */
		err = bpe_lookup(nfp, i, &bpe[i][0], ARRAY_SIZE(bpe[i]));
		if (err < 0)
			goto exit;

		bpes[i] = err;
	}

	/* Verify that traffic ingress is disabled */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		for (p = 0; p < 24; p++) {
			u32 r, mask, tmp;

			mask =  NFP_NBI_MACX_ETH_SEG_CMD_CONFIG_ETH_RX_ENA;
			r =  NFP_NBI_MACX_ETH_SEG_CMD_CONFIG(p % 12);

			err = nfp_nbi_mac_regr(nbi[i],
					       NFP_NBI_MACX_ETH(p / 12),
					       r, &tmp);
			if (err < 0) {
				nfp_err(nfp, "Can't verify RX is disabled for port %d.%d\n",
					i, p);
				goto exit;
			}

			if (tmp & mask) {
				nfp_warn(nfp, "HAZARD: RX for traffic was not disabled by firmware for port %d.%d\n",
					 i, p);
			}

			err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_ETH(p / 12),
					       r, mask, 0);
			if (err < 0) {
				nfp_err(nfp, "Can't disable RX traffic for port %d.%d\n",
					i, p);
				goto exit;
			}
		}
	}

	/* Wait for packets to drain from NBI to NFD or to be freed.
	 * Worst case guess is:
	 *      512 pkts per CTM, 12 MEs per CTM, 800MHz clock rate
	 *      ~1000 cycles to sink a single packet.
	 *      512/12 = 42 pkts per ME, therefore 1000*42=42,000 cycles
	 *      42K cycles at 800Mhz = 52.5us. Round up to 60us.
	 *
	 * TODO: Account for cut-through traffic.
	 */
	usleep_range(60, 100);

	/* Verify all NBI MAC packet buffers have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]);
		if (err < 0)
			goto exit;
	}

	/* Wait for PCIE DMA Queues to empty.
	 *
	 *  How we calculate the wait time for DMA Queues to be empty:
	 *
	 *  Max CTM buffers that could be enqueued to one island:
	 *  512 x (7 ME islands + 2 other islands) = 4608 CTM buffers
	 *
	 *  The minimum rate at which NFD would process that ring would
	 *  occur if NFD records the queues as "up" so that it DMAs the
	 *  whole packet to the host, and if the CTM buffers in the ring
	 *  are all associated with jumbo frames.
	 *
	 *  Jumbo frames are <10kB, and NFD 3.0 processes ToPCI jumbo
	 *  frames at ±35Gbps (measured on star fighter card).
	 *  35e9 / 10 x 1024 x 8 = 427kpps.
	 *
	 *  The time to empty a ring holding 4608 packets at 427kpps
	 *  is 10.79ms.
	 *
	 *  To be conservative we round up to nearest whole number, i.e. 11ms.
	 */
	mdelay(11);

	/* Check all PCIE DMA Queues are empty. */
	for (i = 0; i < 4; i++) {
		int state;
	int empty;
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty);
		if (err < 0)
			goto exit;

		if (!empty) {
			nfp_err(nfp, "PCI%d DMA queues did not drain\n", i);
			err = -ETIMEDOUT;
			goto exit;
		}

		/* Set ARM PCIe Monitor to defaults */
		err = nfp6000_pcie_monitor_set(cpp, i, 0);
		if (err < 0)
			goto exit;
	}

	/* Stop all MEs */
	for (i = 0; i < 64; i++) {
		err = nfp6000_stop_me_island(nfp, i);
		if (err < 0)
			goto exit;
	}

	/* Verify again that PCIe DMA Queues are now empty */
	for (i = 0; i < 4; i++) {
		int state;
	int empty;
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty);
		if (err < 0)
			goto exit;

		if (!empty) {
			nfp_err(nfp, "PCI%d DMA queue is not empty\n", i);
			err = -ETIMEDOUT;
			goto exit;
		}
	}

	/* Clear all PCIe DMA Queues */
	for (i = 0; i < 4; i++) {
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);
		int state;
		const u32 pci = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_PCIE,
						       3, 0, i + 4);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		for (p = 0; p < 256; p++) {
			u32 q = NFP_PCIE_Q(p);

			err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_LO,
					     NFP_QCTLR_STS_LO_RPTR_ENABLE);
			if (err < 0)
				goto exit;

			err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_HI,
					     NFP_QCTLR_STS_HI_EMPTY);
			if (err < 0)
				goto exit;
		}
	}

	/* Reset MAC NBI gaskets */
	for (i = 0; i < 2; i++) {
		u32 mask = NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_MPB |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_MPB |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_CORE |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_CORE |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST;

		if (!nbi[i])
			continue;

		err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST, mask, mask);
		if (err < 0)
			goto exit;

		err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST, mask, 0);
		if (err < 0)
			goto exit;
	}

	/* Wait for the reset to propagate */
	usleep_range(60, 100);

	/* Verify all NBI MAC packet buffers have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]);
		if (err < 0)
			goto exit;
	}

	/* Verify that all NBI/MAC credits have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_check_dma_credits(nfp, nbi[i],
						    &bpe[i][0], bpes[i]);
		if (err < 0)
			goto exit;
	}

	/* Soft reset subcomponents relevant to this model */
	err = nfp6000_island_reset(nfp, nbi_mask);
	if (err < 0)
		goto exit;

	err = nfp6000_island_on(nfp, nbi_mask);
	if (err < 0)
		goto exit;

exit:
	/* No need for NBI access anymore.. */
	for (i = 0; i < 2; i++) {
		if (nbi[i])
			nfp_nbi_close(nbi[i]);
	}

	nfp_resource_release(res);

	return err;
}
Пример #21
0
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
	struct nfp_cpp *cpp = app->pf->cpp;
	struct nfp_cpp_area *area;
	u8 __iomem *mem, *start;

	mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
			    8, &area);
	if (IS_ERR(mem))
		return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);

	start = mem;
	while (mem - start + 8 <= nfp_cpp_area_size(area)) {
		u8 __iomem *value;
		u32 type, length;

		type = readl(mem);
		length = readl(mem + 4);
		value = mem + 8;

		mem += 8 + length;
		if (mem - start > nfp_cpp_area_size(area))
			goto err_release_free;

		switch (type) {
		case NFP_BPF_CAP_TYPE_FUNC:
			if (nfp_bpf_parse_cap_func(app->priv, value, length))
				goto err_release_free;
			break;
		case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
			if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
							  length))
				goto err_release_free;
			break;
		case NFP_BPF_CAP_TYPE_MAPS:
			if (nfp_bpf_parse_cap_maps(app->priv, value, length))
				goto err_release_free;
			break;
		case NFP_BPF_CAP_TYPE_RANDOM:
			if (nfp_bpf_parse_cap_random(app->priv, value, length))
				goto err_release_free;
			break;
		case NFP_BPF_CAP_TYPE_QUEUE_SELECT:
			if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
				goto err_release_free;
			break;
		case NFP_BPF_CAP_TYPE_ADJUST_TAIL:
			if (nfp_bpf_parse_cap_adjust_tail(app->priv, value,
							  length))
				goto err_release_free;
			break;
		case NFP_BPF_CAP_TYPE_ABI_VERSION:
			if (nfp_bpf_parse_cap_abi_version(app->priv, value,
							  length))
				goto err_release_free;
			break;
		default:
			nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
			break;
		}
	}
	if (mem - start != nfp_cpp_area_size(area)) {
		nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
			mem - start, nfp_cpp_area_size(area));
		goto err_release_free;
	}

	nfp_cpp_area_release_free(area);

	return 0;

err_release_free:
	nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
	nfp_cpp_area_release_free(area);
	return -EINVAL;
}
Пример #22
0
static int nfp6000_nbi_mac_check_freebufs(struct nfp_device *nfp,
					  struct nfp_nbi_dev *nbi)
{
	u32 tmp;
	int err, ok, split;
	const int timeout_ms = 500;
	struct timespec ts, timeout = {
		.tv_sec = 0,
		.tv_nsec = timeout_ms * 1000 * 1000,
	};
	const int igsplit = 1007;
	const int egsplit = 495;

	err = nfp_nbi_mac_regr(nbi, NFP_NBI_MACX_CSR,
			       NFP_NBI_MACX_MAC_SYS_SUPPORT_CTRL, &tmp);
	if (err < 0)
		return err;

	split = tmp & NFP_NBI_MACX_MAC_SYS_SUPPORT_CTRL_SPLIT_MEM_IG;

	ts = CURRENT_TIME;
	timeout = timespec_add(ts, timeout);

	ok = 1;
	do {
		int igcount, igcount1, egcount, egcount1;

		err = nfp_nbi_mac_regr(nbi, NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_IG_BCP_COUNT, &tmp);
		if (err < 0)
			return err;

		igcount = NFP_NBI_MACX_IG_BCP_COUNT_IG_BCC_of(tmp);
		igcount1 = NFP_NBI_MACX_IG_BCP_COUNT_IG_BCC1_of(tmp);

		err = nfp_nbi_mac_regr(nbi, NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_EG_BCP_COUNT, &tmp);
		if (err < 0)
			return err;

		egcount = NFP_NBI_MACX_EG_BCP_COUNT_EG_BCC_of(tmp);
		egcount1 = NFP_NBI_MACX_EG_BCP_COUNT_EG_BCC1_of(tmp);

		if (split) {
			ok &= (igcount >= igsplit);
			ok &= (egcount >= egsplit);
			ok &= (igcount1 >= igsplit);
			ok &= (egcount1 >= egsplit);
		} else {
			ok &= (igcount >= igsplit * 2);
			ok &= (egcount >= egsplit * 2);
		}

		if (!ok) {
			ts = CURRENT_TIME;
			if (timespec_compare(&ts, &timeout) >= 0) {
				nfp_err(nfp, "After %dms, NBI%d did not flush all packet buffers\n",
					timeout_ms, nfp_nbi_index(nbi));
				if (split) {
					nfp_err(nfp, "\t(ingress %d/%d != %d/%d, egress %d/%d != %d/%d)\n",
						igcount, igcount1,
						igsplit, igsplit,
						egcount, egcount1,
						egsplit, egsplit);
				} else {
					nfp_err(nfp, "\t(ingress %d != %d, egress %d != %d)\n",
						igcount, igsplit,
						egcount, egsplit);
				}
				return -ETIMEDOUT;
			}
		}
	} while (!ok);

	return 0;
}

static int nfp6000_nbi_check_dma_credits(struct nfp_device *nfp,
					 struct nfp_nbi_dev *nbi,
					 const u32 *bpe, int bpes)
{
	int err, p;
	u32 tmp;

	if (bpes < 1)
		return 0;

	for (p = 0; p < bpes; p++) {
		int ctm, pkt, buf, bp;
		int ctmb, pktb, bufb;

		err = nfp_nbi_mac_regr(nbi, NFP_NBI_DMAX_CSR,
				       NFP_NBI_DMAX_CSR_NBI_DMA_BPE_CFG(p),
				       &tmp);
		if (err < 0)
			return err;

		bp = NFP_NBI_DMAX_CSR_NBI_DMA_BPE_CFG_BPENUM_of(tmp);

		ctm = NFP_NBI_DMAX_CSR_NBI_DMA_BPE_CFG_CTM_of(tmp);
		ctmb = NFP_NBI_DMAX_CSR_NBI_DMA_BPE_CFG_CTM_of(bpe[bp]);

		pkt = NFP_NBI_DMAX_CSR_NBI_DMA_BPE_CFG_PKT_CREDIT_of(tmp);
		pktb = NFP_NBI_DMAX_CSR_NBI_DMA_BPE_CFG_PKT_CREDIT_of(bpe[bp]);

		buf = NFP_NBI_DMAX_CSR_NBI_DMA_BPE_CFG_BUF_CREDIT_of(tmp);
		bufb = NFP_NBI_DMAX_CSR_NBI_DMA_BPE_CFG_BUF_CREDIT_of(bpe[bp]);

		if (ctm != ctmb) {
			nfp_err(nfp, "NBI%d DMA BPE%d targets CTM%d, expected CTM%d\n",
				nfp_nbi_index(nbi), bp, ctm, ctmb);
			return -EBUSY;
		}

		if (pkt != pktb) {
			nfp_err(nfp, "NBI%d DMA BPE%d (CTM%d) outstanding packets (%d != %d)\n",
				nfp_nbi_index(nbi), bp, ctm, pkt, pktb);
			return -EBUSY;
		}

		if (buf != bufb) {
			nfp_err(nfp, "NBI%d DMA BPE%d (CTM%d) outstanding buffers (%d != %d)\n",
				nfp_nbi_index(nbi), bp, ctm, buf, bufb);
			return -EBUSY;
		}
	}

	return 0;
}
Пример #23
0
/**
 * nfp_nsp_command() - Execute a command on the NFP Service Processor
 * @nfp:	NFP Device handle
 * @code:	NSP Command Code
 *
 * Return: 0 for success with no result
 *
 *         1..255 for NSP completion with a result code
 *
 *         -EAGAIN if the NSP is not yet present
 *
 *         -ENODEV if the NSP is not a supported model
 *
 *         -EBUSY if the NSP is stuck
 *
 *         -EINTR if interrupted while waiting for completion
 *
 *         -ETIMEDOUT if the NSP took longer than 30 seconds to complete
 */
int nfp_nsp_command(struct nfp_device *nfp, uint16_t code, u32 option,
		    u32 buff_cpp, u64 buff_addr)
{
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	struct nfp_nsp *nsp;
	u32 nsp_cpp;
	u64 nsp_base;
	u64 nsp_status;
	u64 nsp_command;
	u64 nsp_buffer;
	int err, ok;
	u64 tmp;
	int timeout = 30 * 10;	/* 30 seconds total */

	nsp = nfp_device_private(nfp, nfp_nsp_con);
	if (!nsp)
		return -EAGAIN;

	nsp_cpp = nfp_resource_cpp_id(nsp->res);
	nsp_base = nfp_resource_address(nsp->res);
	nsp_status = nsp_base + NSP_STATUS;
	nsp_command = nsp_base + NSP_COMMAND;
	nsp_buffer = nsp_base + NSP_BUFFER;

	err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &tmp);
	if (err < 0)
		return err;

	if (NSP_MAGIC != NSP_STATUS_MAGIC_of(tmp)) {
		nfp_err(nfp, "NSP: Cannot detect NFP Service Processor\n");
		return -ENODEV;
	}

	ok = NSP_STATUS_MAJOR_of(tmp) == NSP_CODE_MAJOR_of(code) &&
	     NSP_STATUS_MINOR_of(tmp) >= NSP_CODE_MINOR_of(code);
	if (!ok) {
		nfp_err(nfp, "NSP: Code 0x%04x not supported (ABI %d.%d)\n",
			code,
			(int)NSP_STATUS_MAJOR_of(tmp),
			(int)NSP_STATUS_MINOR_of(tmp));
		return -EINVAL;
	}

	if (tmp & NSP_STATUS_BUSY) {
		nfp_err(nfp, "NSP: Service processor busy!\n");
		return -EBUSY;
	}

	err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
			     NSP_BUFFER_CPP(buff_cpp) |
			     NSP_BUFFER_ADDRESS(buff_addr));
	if (err < 0)
		return err;

	err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
			     NSP_COMMAND_OPTION(option) |
			     NSP_COMMAND_CODE(code) | NSP_COMMAND_START);
	if (err < 0)
		return err;

	/* Wait for NSP_COMMAND_START to go to 0 */
	for (; timeout > 0; timeout--) {
		err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &tmp);
		if (err < 0)
			return err;

		if (!(tmp & NSP_COMMAND_START))
			break;

		if (msleep_interruptible(100) > 0) {
			nfp_warn(nfp, "NSP: Interrupt waiting for code 0x%04x to start\n",
				 code);
			return -EINTR;
		}
	}

	if (timeout < 0) {
		nfp_warn(nfp, "NSP: Timeout waiting for code 0x%04x to start\n",
			 code);
		return -ETIMEDOUT;
	}

	/* Wait for NSP_STATUS_BUSY to go to 0 */
	for (; timeout > 0; timeout--) {
		err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &tmp);
		if (err < 0)
			return err;

		if (!(tmp & NSP_STATUS_BUSY))
			break;

		if (msleep_interruptible(100) > 0) {
			nfp_warn(nfp, "NSP: Interrupt waiting for code 0x%04x to complete\n",
				 code);
			return -EINTR;
		}
	}

	if (timeout < 0) {
		nfp_warn(nfp, "NSP: Timeout waiting for code 0x%04x to complete\n",
			 code);
		return -ETIMEDOUT;
	}

	err = NSP_STATUS_RESULT_of(tmp);
	if (err > 0)
		return -err;

	err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &tmp);
	if (err < 0)
		return err;

	return NSP_COMMAND_OPTION_of(tmp);
}