Beispiel #1
0
void
dump_acl6_rule(struct rte_mbuf *m, uint32_t sig)
{
	unsigned i;
	uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
	struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(
	    m, unsigned char *)+sizeof(struct ether_hdr));

	acl_log("Packet Src");
	for (i = 0; i < RTE_DIM(ipv6_hdr->src_addr); i += sizeof(uint16_t))
		acl_log(":%.2x%.2x", ipv6_hdr->src_addr[i],
			ipv6_hdr->src_addr[i + 1]);

	acl_log("\nDst");
	for (i = 0; i < RTE_DIM(ipv6_hdr->dst_addr); i += sizeof(uint16_t))
		acl_log(":%.2x%.2x", ipv6_hdr->dst_addr[i],
			ipv6_hdr->dst_addr[i + 1]);

	acl_log("\nSrc port:%hu,Dst port:%hu ",
		rte_bswap16(*(uint16_t *)(ipv6_hdr + 1)),
		rte_bswap16(*((uint16_t *)(ipv6_hdr + 1) + 1)));
	acl_log("hit ACL %d - ", offset);

	print_one_ipv6_rule(acl_config.rule_ipv6 + offset, 1);

	acl_log("\n\n");
}
Beispiel #2
0
int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
				struct rte_eth_xstat_name *xstats_names,
				const uint64_t *ids, unsigned int limit)
{
	/* Account for the Tx drop pkts aka the Anti spoof counter */
	const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
				RTE_DIM(bnxt_tx_stats_strings) + 1;
	struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
	uint16_t i;

	if (!ids)
		return bnxt_dev_xstats_get_names_op(dev, xstats_names,
						    stat_cnt);
	bnxt_dev_xstats_get_names_by_id_op(dev, xstats_names_copy, NULL,
			stat_cnt);

	for (i = 0; i < limit; i++) {
		if (ids[i] >= stat_cnt) {
			RTE_LOG(ERR, PMD, "id value isn't valid");
			return -1;
		}
		strcpy(xstats_names[i].name,
				xstats_names_copy[ids[i]].name);
	}
	return stat_cnt;
}
Beispiel #3
0
static struct rte_acl_ctx *
setup_acl(struct rte_acl_rule *acl_base, unsigned int acl_num, int ipv6,
	  int socketid)
{
	char name[PATH_MAX];
	struct rte_acl_param acl_param;
	struct rte_acl_config acl_build_param;
	struct rte_acl_ctx *context;
	int dim = ipv6 ? RTE_DIM(ipv6_defs) : RTE_DIM(ipv4_defs);
	static uint32_t ctx_count[NB_SOCKETS] = {0};

	if (!acl_num)
		return NULL;

	/* Create ACL contexts */
	snprintf(name, sizeof(name), "%s%d-%d",
		 ipv6 ? L3FWD_ACL_IPV6_NAME : L3FWD_ACL_IPV4_NAME, socketid, ctx_count[socketid]++);

	acl_param.name = name;
	acl_param.socket_id = socketid;
	acl_param.rule_size = RTE_ACL_RULE_SZ(dim);
	acl_param.max_rule_num = MAX_ACL_RULE_NUM;

	if ((context = rte_acl_create(&acl_param)) == NULL) {
		acl_log("Failed to create ACL context\n");
		goto err;
	}

	if (acl_parm_config.aclavx2 &&
	    rte_acl_set_ctx_classify(context, RTE_ACL_CLASSIFY_AVX2) != 0) {
		acl_log("Failed to setup classify method for  ACL context\n");
		goto err;
	}

	if (rte_acl_add_rules(context, acl_base, acl_num) < 0) {
		acl_log("add rules failed\n");
		goto err;
	}

	/* Perform builds */
	memset(&acl_build_param, 0, sizeof(acl_build_param));

	acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
	acl_build_param.num_fields = dim;
	memcpy(&acl_build_param.defs, ipv6 ? ipv6_defs : ipv4_defs,
	       ipv6 ? sizeof(ipv6_defs) : sizeof(ipv4_defs));

	if (rte_acl_build(context, &acl_build_param) != 0) {
		acl_log("Failed to build ACL trie\n");
		goto err;
	}

	rte_acl_dump(context);

	return context;
err:
	rte_acl_free(context);
	return NULL;
}
Beispiel #4
0
void
rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
{
	char name[PATH_MAX];
	unsigned i;
	int ret;
	struct rte_lpm *lpm;
	struct ipv4_route *rt;
	char a, b, c, d;
	unsigned nb_routes;
	struct rte_lpm_config conf = { 0 };

	if (ctx == NULL)
		rte_exit(EXIT_FAILURE, "NULL context.\n");

	if (ctx->rt_ipv4 != NULL)
		rte_exit(EXIT_FAILURE, "Routing Table for socket %u already "
			"initialized\n", socket_id);

	printf("Creating Routing Table (RT) context with %u max routes\n",
			RT_IPV4_MAX_RULES);

	if (ep == 0) {
		rt = rt_ipv4_ep0;
		nb_routes = RTE_DIM(rt_ipv4_ep0);
	} else if (ep == 1) {
		rt = rt_ipv4_ep1;
		nb_routes = RTE_DIM(rt_ipv4_ep1);
	} else
		rte_exit(EXIT_FAILURE, "Invalid EP value %u. Only 0 or 1 "
			"supported.\n", ep);

	/* create the LPM table */
	snprintf(name, sizeof(name), "%s_%u", "rt_ipv4", socket_id);
	conf.max_rules = RT_IPV4_MAX_RULES;
	conf.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES;
	lpm = rte_lpm_create(name, socket_id, &conf);
	if (lpm == NULL)
		rte_exit(EXIT_FAILURE, "Unable to create LPM table "
			"on socket %d\n", socket_id);

	/* populate the LPM table */
	for (i = 0; i < nb_routes; i++) {
		ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].if_out);
		if (ret < 0)
			rte_exit(EXIT_FAILURE, "Unable to add entry num %u to "
				"LPM table on socket %d\n", i, socket_id);

		uint32_t_to_char(rt[i].ip, &a, &b, &c, &d);
		printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n",
				a, b, c, d, rt[i].depth, rt[i].if_out);
	}

	ctx->rt_ipv4 = (struct rt_ctx *)lpm;
}
Beispiel #5
0
int rte_ivshmem_metadata_create(const char *name)
{
	struct ivshmem_config * ivshmem_config;
	unsigned index;

	if (pagesz == 0)
		pagesz = getpagesize();

	if (name == NULL)
		return -1;

	rte_spinlock_lock(&global_cfg_sl);

	for (index = 0; index < RTE_DIM(ivshmem_global_config); index++) {
		if (ivshmem_global_config[index].metadata == NULL) {
			ivshmem_config = &ivshmem_global_config[index];
			break;
		}
	}

	if (index == RTE_DIM(ivshmem_global_config)) {
		RTE_LOG(ERR, EAL, "Cannot create more ivshmem config files. "
		"Maximum has been reached\n");
		rte_spinlock_unlock(&global_cfg_sl);
		return -1;
	}

	ivshmem_config->lock.l_type = F_WRLCK;
	ivshmem_config->lock.l_whence = SEEK_SET;

	ivshmem_config->lock.l_start = 0;
	ivshmem_config->lock.l_len = METADATA_SIZE_ALIGNED;

	ivshmem_global_config[index].metadata = ((struct rte_ivshmem_metadata *)
			ivshmem_metadata_create(
					name,
					sizeof(struct rte_ivshmem_metadata),
					&ivshmem_config->lock));

	if (ivshmem_global_config[index].metadata == NULL) {
		rte_spinlock_unlock(&global_cfg_sl);
		return -1;
	}

	/* Metadata setup */
	memset(ivshmem_config->metadata, 0, sizeof(struct rte_ivshmem_metadata));
	ivshmem_config->metadata->magic_number = IVSHMEM_MAGIC;
	snprintf(ivshmem_config->metadata->name,
			sizeof(ivshmem_config->metadata->name), "%s", name);

	rte_spinlock_unlock(&global_cfg_sl);

	return 0;
}
Beispiel #6
0
static int32_t
fd_reserve(void)
{
	uint32_t i;

	for (i = 0; i != RTE_DIM(fd_port) && fd_port[i].port != FD_PORT_FREE;
			i++)
		;

	if (i == RTE_DIM(fd_port))
		return (-ENOMEM);

	fd_port[i].port = FD_PORT_RSRV;
	return (IDX_TO_FD(i));
}
Beispiel #7
0
void
sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
{
	const struct ipsec_sa *sa_out_entries, *sa_in_entries;
	unsigned nb_out_entries, nb_in_entries;
	const char *name;

	if (ctx == NULL)
		rte_exit(EXIT_FAILURE, "NULL context.\n");

	if (ctx->sa_ipv4_in != NULL)
		rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
				"initialized\n", socket_id);

	if (ctx->sa_ipv4_out != NULL)
		rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
				"initialized\n", socket_id);

	if (ep == 0) {
		sa_out_entries = sa_ep0_out;
		nb_out_entries = RTE_DIM(sa_ep0_out);
		sa_in_entries = sa_ep0_in;
		nb_in_entries = RTE_DIM(sa_ep0_in);
	} else if (ep == 1) {
		sa_out_entries = sa_ep1_out;
		nb_out_entries = RTE_DIM(sa_ep1_out);
		sa_in_entries = sa_ep1_in;
		nb_in_entries = RTE_DIM(sa_ep1_in);
	} else
		rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
				"Only 0 or 1 supported.\n", ep);

	name = "sa_ipv4_in";
	ctx->sa_ipv4_in = sa_ipv4_create(name, socket_id);
	if (ctx->sa_ipv4_in == NULL)
		rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
				"in socket %d\n", rte_errno, name, socket_id);

	name = "sa_ipv4_out";
	ctx->sa_ipv4_out = sa_ipv4_create(name, socket_id);
	if (ctx->sa_ipv4_out == NULL)
		rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
				"in socket %d\n", rte_errno, name, socket_id);

	sa_in_add_rules(ctx->sa_ipv4_in, sa_in_entries, nb_in_entries);

	sa_out_add_rules(ctx->sa_ipv4_out, sa_out_entries, nb_out_entries);
}
Beispiel #8
0
int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
			   struct rte_eth_xstat *xstats, unsigned int n)
{
	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;

	unsigned int count, i;
	uint64_t tx_drop_pkts;

	if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
		RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
		return 0;
	}

	bnxt_hwrm_port_qstats(bp);
	bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);

	count = RTE_DIM(bnxt_rx_stats_strings) +
		RTE_DIM(bnxt_tx_stats_strings) + 1; /* For tx_drop_pkts */

	if (n < count)
		return count;

	count = 0;
	for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
		uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
		xstats[count].id = count;
		xstats[count].value = rte_le_to_cpu_64(
				*(uint64_t *)((char *)rx_stats +
				bnxt_rx_stats_strings[i].offset));
		count++;
	}

	for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
		uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
		xstats[count].id = count;
		xstats[count].value = rte_le_to_cpu_64(
				 *(uint64_t *)((char *)tx_stats +
				bnxt_tx_stats_strings[i].offset));
		count++;
	}

	/* The Tx drop pkts aka the Anti spoof coounter */
	xstats[count].id = count;
	xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
	count++;

	return count;
}
/**
 * Checks if the machine is adequate for running the binary. If it is not, the
 * program exits with status 1.
 */
void
rte_cpu_check_supported(void)
{
	/* This is generated at compile-time by the build system */
	static const enum rte_cpu_flag_t compile_time_flags[] = {
			RTE_COMPILE_TIME_CPUFLAGS
	};
	unsigned count = RTE_DIM(compile_time_flags), i;
	int ret;

	for (i = 0; i < count; i++) {
		ret = rte_cpu_get_flag_enabled(compile_time_flags[i]);

		if (ret < 0) {
			fprintf(stderr,
				"ERROR: CPU feature flag lookup failed with error %d\n",
				ret);
			exit(1);
		}
		if (!ret) {
			fprintf(stderr,
			        "ERROR: This system does not support \"%s\".\n"
			        "Please check that RTE_MACHINE is set correctly.\n",
			        rte_cpu_get_flag_name(compile_time_flags[i]));
			exit(1);
		}
	}
}
Beispiel #10
0
void
deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
{
	uint8_t slave_pos;
	struct bond_dev_private *internals = eth_dev->data->dev_private;
	uint8_t active_count = internals->active_slave_count;

	if (internals->mode == BONDING_MODE_8023AD) {
		bond_mode_8023ad_stop(eth_dev);
		bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
	}

	slave_pos = find_slave_by_id(internals->active_slaves, active_count,
			port_id);

	/* If slave was not at the end of the list
	 * shift active slaves up active array list */
	if (slave_pos < active_count) {
		active_count--;
		memmove(internals->active_slaves + slave_pos,
				internals->active_slaves + slave_pos + 1,
				(active_count - slave_pos) *
					sizeof(internals->active_slaves[0]));
	}

	RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
	internals->active_slave_count = active_count;

	if (eth_dev->data->dev_started && internals->mode == BONDING_MODE_8023AD)
		bond_mode_8023ad_start(eth_dev);
}
Beispiel #11
0
static int
parse_args(int argc, char **argv)
{
	int opt;

	while ((opt = getopt(argc, argv, "hi:")) != -1) {
		switch (opt) {
		case 'h':
			usage(argv[0]);
			rte_exit(EXIT_SUCCESS, "exiting...");
			break;
		case 'i':
			if (ports.num >= RTE_DIM(ports.p)) {
				usage(argv[0]);
				rte_exit(EXIT_FAILURE, "configs with %u "
					"ports are not supported\n",
					ports.num + 1);

			}

			ports.p[ports.num].str = optarg;
			ports.p[ports.num].id = parse_portid(optarg);
			ports.num++;
			break;
		default:
			usage(argv[0]);
			rte_exit(EXIT_FAILURE, "invalid option: %c\n", opt);
		}
	}

	return 0;
}
Beispiel #12
0
static int
parse_cperf_test_type(struct cperf_options *opts, const char *arg)
{
	struct name_id_map cperftest_namemap[] = {
		{
			cperf_test_type_strs[CPERF_TEST_TYPE_THROUGHPUT],
			CPERF_TEST_TYPE_THROUGHPUT
		},
		{
			cperf_test_type_strs[CPERF_TEST_TYPE_VERIFY],
			CPERF_TEST_TYPE_VERIFY
		},
		{
			cperf_test_type_strs[CPERF_TEST_TYPE_LATENCY],
			CPERF_TEST_TYPE_LATENCY
		}
	};

	int id = get_str_key_id_mapping(
			(struct name_id_map *)cperftest_namemap,
			RTE_DIM(cperftest_namemap), arg);
	if (id < 0) {
		RTE_LOG(ERR, USER1, "failed to parse test type");
		return -1;
	}

	opts->test = (enum cperf_perf_test_type)id;

	return 0;
}
/**
 * Register a Memory Region (MR) <-> Memory Pool (MP) association in
 * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
 *
 * This function should only be called by txq_mp2mr().
 *
 * @param txq
 *   Pointer to TX queue structure.
 * @param[in] mp
 *   Memory Pool for which a Memory Region lkey must be returned.
 * @param idx
 *   Index of the next available entry.
 *
 * @return
 *   mr->lkey on success, (uint32_t)-1 on failure.
 */
uint32_t
txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
{
	struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
	struct ibv_mr *mr;

	/* Add a new entry, register MR first. */
	DEBUG("%p: discovered new memory pool \"%s\" (%p)",
	      (void *)txq_ctrl, mp->name, (void *)mp);
	mr = mlx5_mp2mr(txq_ctrl->priv->pd, mp);
	if (unlikely(mr == NULL)) {
		DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
		      (void *)txq_ctrl);
		return (uint32_t)-1;
	}
	if (unlikely(idx == RTE_DIM(txq_ctrl->txq.mp2mr))) {
		/* Table is full, remove oldest entry. */
		DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
		      (void *)txq_ctrl);
		--idx;
		claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[0].mr));
		memmove(&txq_ctrl->txq.mp2mr[0], &txq_ctrl->txq.mp2mr[1],
			(sizeof(txq_ctrl->txq.mp2mr) -
			 sizeof(txq_ctrl->txq.mp2mr[0])));
	}
	/* Store the new entry. */
	txq_ctrl->txq.mp2mr[idx].mp = mp;
	txq_ctrl->txq.mp2mr[idx].mr = mr;
	txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
	DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
	      (void *)txq_ctrl, mp->name, (void *)mp,
	      txq_ctrl->txq.mp2mr[idx].lkey);
	return txq_ctrl->txq.mp2mr[idx].lkey;
}
Beispiel #14
0
int
vfio_has_supported_extensions(int vfio_container_fd)
{
	int ret;
	unsigned idx, n_extensions = 0;
	for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
		const struct vfio_iommu_type *t = &iommu_types[idx];

		ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
				t->type_id);
		if (ret < 0) {
			RTE_LOG(ERR, EAL, "  could not get IOMMU type, "
				"error %i (%s)\n", errno,
				strerror(errno));
			close(vfio_container_fd);
			return -1;
		} else if (ret == 1) {
			/* we found a supported extension */
			n_extensions++;
		}
		RTE_LOG(DEBUG, EAL, "  IOMMU type %d (%s) is %s\n",
				t->type_id, t->name,
				ret ? "supported" : "not supported");
	}

	/* if we didn't find any supported IOMMU types, fail */
	if (!n_extensions) {
		close(vfio_container_fd);
		return -1;
	}

	return 0;
}
void
activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
{
	struct bond_dev_private *internals = eth_dev->data->dev_private;
	uint8_t active_count = internals->active_slave_count;

	if (internals->mode == BONDING_MODE_8023AD)
		bond_mode_8023ad_activate_slave(eth_dev, port_id);

	if (internals->mode == BONDING_MODE_TLB
			|| internals->mode == BONDING_MODE_ALB) {

		internals->tlb_slaves_order[active_count] = port_id;
	}

	RTE_VERIFY(internals->active_slave_count <
			(RTE_DIM(internals->active_slaves) - 1));

	internals->active_slaves[internals->active_slave_count] = port_id;
	internals->active_slave_count++;

	if (internals->mode == BONDING_MODE_TLB)
		bond_tlb_activate_slave(internals);
	if (internals->mode == BONDING_MODE_ALB)
		bond_mode_alb_client_list_upd(eth_dev);
}
Beispiel #16
0
void
rte_ivshmem_metadata_dump(FILE *f, const char *name)
{
	unsigned i = 0;
	struct ivshmem_config * config;
	struct rte_ivshmem_metadata_entry *entry;
#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
	uint64_t addr;
	uint64_t end, hugepage_sz;
	struct memseg_cache_entry e;
#endif

	if (name == NULL)
		return;

	/* return error if we try to use an unknown config file */
	config = get_config_by_name(name);
	if (config == NULL) {
		RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
		return;
	}

	rte_spinlock_lock(&config->sl);

	entry = &config->metadata->entry[0];

	while (entry->mz.addr != NULL && i < RTE_DIM(config->metadata->entry)) {

		fprintf(f, "Entry %u: name:<%-20s>, phys:0x%-15lx, len:0x%-15lx, "
			"virt:%-15p, off:0x%-15lx\n",
			i,
			entry->mz.name,
			entry->mz.phys_addr,
			entry->mz.len,
			entry->mz.addr,
			entry->offset);
		i++;

#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
		fprintf(f, "\tHugepage files:\n");

		hugepage_sz = entry->mz.hugepage_sz;
		addr = RTE_ALIGN_FLOOR(entry->mz.addr_64, hugepage_sz);
		end = addr + RTE_ALIGN_CEIL(entry->mz.len + (entry->mz.addr_64 - addr),
				hugepage_sz);

		for (; addr < end; addr += hugepage_sz) {
			memset(&e, 0, sizeof(e));

			get_hugefile_by_virt_addr(addr, &e);

			fprintf(f, "\t0x%"PRIx64 "-0x%" PRIx64 " offset: 0x%" PRIx64 " %s\n",
					addr, addr + hugepage_sz, e.offset, e.filepath);
		}
#endif
		entry++;
	}

	rte_spinlock_unlock(&config->sl);
}
Beispiel #17
0
static int
app_pipeline_fa_dscp_ls(struct app_params *app,
		uint32_t pipeline_id)
{
	struct app_pipeline_fa *p;
	uint32_t i;

	/* Check input arguments */
	if (app == NULL)
		return -1;

	p = app_pipeline_data_fe(app, pipeline_id,
		&pipeline_flow_actions);
	if (p == NULL)
		return -1;

	if (p->params.dscp_enabled == 0)
		return -1;

	for (i = 0; i < RTE_DIM(p->dscp); i++) {
		struct app_pipeline_fa_dscp *dscp =	&p->dscp[i];

		printf("DSCP = %2" PRIu32 ": Traffic class = %" PRIu32
			", Color = %s\n",
			i,
			dscp->traffic_class,
			color_to_string(dscp->color));
	}

	return 0;
}
Beispiel #18
0
static int
parse_ipv6_net(const char *in, struct rte_acl_field field[4])
{
	int32_t rc;
	const char *mp;
	uint32_t i, m, v[4];
	const uint32_t nbu32 = sizeof(uint32_t) * CHAR_BIT;

	// TODO may be replaced by inet_pton with some refactoring
	/* get address. */
	rc = parse_ipv6_addr(in, &mp, v, '/');
	if (rc != 0)
		return rc;

	/* get mask. */
	GET_CB_FIELD(mp, m, 0, CHAR_BIT * sizeof(v), 0);

	/* put all together. */
	for (i = 0; i != RTE_DIM(v); i++) {
		if (m >= (i + 1) * nbu32)
			field[i].mask_range.u32 = nbu32;
		else
			field[i].mask_range.u32 =
			    m > (i * nbu32) ? m - (i * 32) : 0;

		field[i].value.u32 = v[i];
	}

	return 0;
}
Beispiel #19
0
/**
 * Give the library a mempool of rte_mbufs with which it can do the
 * rte_mbuf <--> netmap slot conversions.
 */
int
rte_netmap_init(const struct rte_netmap_conf *conf)
{
	size_t buf_ofs, nmif_sz, sz;
	size_t port_rings, port_slots, port_bufs;
	uint32_t i, port_num;

	port_num = RTE_MAX_ETHPORTS;
	port_rings = 2 * conf->max_rings;
	port_slots = port_rings * conf->max_slots;
    	port_bufs = port_slots;

	nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots);
	sz = nmif_sz * port_num;

	buf_ofs = RTE_ALIGN_CEIL(sz, CACHE_LINE_SIZE);
	sz = buf_ofs + port_bufs * conf->max_bufsz * port_num;

	if (sz > UINT32_MAX ||
			(netmap.mem = rte_zmalloc_socket(__func__, sz,
			CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
		RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
			__func__, sz);
		return (-ENOMEM);
	}

	netmap.mem_sz = sz;
	netmap.netif_memsz = nmif_sz;
	netmap.buf_start = (uintptr_t)netmap.mem + buf_ofs;
	netmap.conf = *conf;

	rte_spinlock_init(&netmap_lock);

	/* Mark all ports as unused and set NETIF pointer. */
	for (i = 0; i != RTE_DIM(ports); i++) {
		ports[i].fd = UINT32_MAX;
		ports[i].nmif = (struct netmap_if *)
			((uintptr_t)netmap.mem + nmif_sz * i);
	}

	/* Mark all fd_ports as unused. */
	for (i = 0; i != RTE_DIM(fd_port); i++) {
		fd_port[i].port = FD_PORT_FREE;
	}

	return (0);
}
Beispiel #20
0
/*
 * Execute trie traversal with 2 traversals in parallel.
 */
static inline int
search_sse_2(const struct rte_acl_ctx *ctx, const uint8_t **data,
	uint32_t *results, uint32_t total_packets, uint32_t categories)
{
	int n;
	struct acl_flow_data flows;
	uint64_t index_array[MAX_SEARCHES_SSE2];
	struct completion cmplt[MAX_SEARCHES_SSE2];
	struct parms parms[MAX_SEARCHES_SSE2];
	xmm_t input, indicies;

	acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
		total_packets, categories, ctx->trans_table);

	for (n = 0; n < MAX_SEARCHES_SSE2; n++) {
		cmplt[n].count = 0;
		index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
	}

	indicies = MM_LOADU((xmm_t *) &index_array[0]);

	/* Check for any matches. */
	acl_match_check_x2(0, ctx, parms, &flows, &indicies, mm_match_mask64.m);

	while (flows.started > 0) {

		/* Gather 4 bytes of input data for each stream. */
		input = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 0), 0);
		input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);

		/* Process the 4 bytes of input on each stream. */

		input = transition2(mm_index_mask64.m, input,
			mm_shuffle_input64.m, mm_ones_16.m,
			mm_bytes64.m, mm_type_quad_range64.m,
			flows.trans, &indicies);

		input = transition2(mm_index_mask64.m, input,
			mm_shuffle_input64.m, mm_ones_16.m,
			mm_bytes64.m, mm_type_quad_range64.m,
			flows.trans, &indicies);

		input = transition2(mm_index_mask64.m, input,
			mm_shuffle_input64.m, mm_ones_16.m,
			mm_bytes64.m, mm_type_quad_range64.m,
			flows.trans, &indicies);

		input = transition2(mm_index_mask64.m, input,
			mm_shuffle_input64.m, mm_ones_16.m,
			mm_bytes64.m, mm_type_quad_range64.m,
			flows.trans, &indicies);

		/* Check for any matches. */
		acl_match_check_x2(0, ctx, parms, &flows, &indicies,
			mm_match_mask64.m);
	}

	return 0;
}
int mg_5tuple_build_filter(struct rte_acl_ctx * acx, uint32_t num_categories){
  struct rte_acl_config cfg;
  printf("build: num_categories = %d\n", num_categories);
  cfg.num_categories = num_categories;
  cfg.num_fields = RTE_DIM(ipv4_defs);
  memcpy(cfg.defs, ipv4_defs, sizeof(ipv4_defs));
  return rte_acl_build(acx, &cfg);
}
Beispiel #22
0
Datei: sp.c Projekt: 0day-ci/dpdk
static struct rte_acl_ctx *
acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
		unsigned rules_nb)
{
	char s[PATH_MAX];
	struct rte_acl_param acl_param;
	struct rte_acl_config acl_build_param;
	struct rte_acl_ctx *ctx;

	printf("Creating SP context with %u max rules\n", MAX_ACL_RULE_NUM);

	memset(&acl_param, 0, sizeof(acl_param));

	/* Create ACL contexts */
	snprintf(s, sizeof(s), "%s_%d", name, socketid);

	printf("IPv4 %s entries [%u]:\n", s, rules_nb);
	dump_ipv4_rules(rules, rules_nb, 1);

	acl_param.name = s;
	acl_param.socket_id = socketid;
	acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ipv4_defs));
	acl_param.max_rule_num = MAX_ACL_RULE_NUM;

	ctx = rte_acl_create(&acl_param);
	if (ctx == NULL)
		rte_exit(EXIT_FAILURE, "Failed to create ACL context\n");

	if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules,
				rules_nb) < 0)
		rte_exit(EXIT_FAILURE, "add rules failed\n");

	/* Perform builds */
	memset(&acl_build_param, 0, sizeof(acl_build_param));

	acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
	acl_build_param.num_fields = RTE_DIM(ipv4_defs);
	memcpy(&acl_build_param.defs, ipv4_defs, sizeof(ipv4_defs));

	if (rte_acl_build(ctx, &acl_build_param) != 0)
		rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n");

	rte_acl_dump(ctx);

	return ctx;
}
Beispiel #23
0
static int
add_memzone_to_metadata(const struct rte_memzone * mz,
		struct ivshmem_config * config)
{
	struct rte_ivshmem_metadata_entry * entry;
	unsigned i;

	rte_spinlock_lock(&config->sl);

	/* find free slot in this config */
	for (i = 0; i < RTE_DIM(config->metadata->entry); i++) {
		entry = &config->metadata->entry[i];

		if (&entry->mz.addr_64 != 0 && overlap(mz, &entry->mz)) {
			RTE_LOG(ERR, EAL, "Overlapping memzones!\n");
			goto fail;
		}

		/* if addr is zero, the memzone is probably free */
		if (entry->mz.addr_64 == 0) {
			RTE_LOG(DEBUG, EAL, "Adding memzone '%s' at %p to metadata %s\n",
					mz->name, mz->addr, config->metadata->name);
			memcpy(&entry->mz, mz, sizeof(struct rte_memzone));

			/* run config file parser */
			if (build_config(config->metadata) < 0)
				goto fail;

			break;
		}
	}

	/* if we reached the maximum, that means we have no place in config */
	if (i == RTE_DIM(config->metadata->entry)) {
		RTE_LOG(ERR, EAL, "No space left in IVSHMEM metadata %s!\n",
				config->metadata->name);
		goto fail;
	}

	rte_spinlock_unlock(&config->sl);
	return 0;
fail:
	rte_spinlock_unlock(&config->sl);
	return -1;
}
Beispiel #24
0
/**
 * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
 * remove an entry first.
 *
 * @param txq
 *   Pointer to TX queue structure.
 * @param[in] mp
 *   Memory Pool for which a Memory Region lkey must be returned.
 *
 * @return
 *   mr->lkey on success, (uint32_t)-1 on failure.
 */
static uint32_t
txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
{
	unsigned int i;
	struct ibv_mr *mr;

	for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
		if (unlikely(txq->mp2mr[i].mp == NULL)) {
			/* Unknown MP, add a new MR for it. */
			break;
		}
		if (txq->mp2mr[i].mp == mp) {
			assert(txq->mp2mr[i].lkey != (uint32_t)-1);
			assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
			return txq->mp2mr[i].lkey;
		}
	}
	/* Add a new entry, register MR first. */
	DEBUG("%p: discovered new memory pool %p", (void *)txq, (void *)mp);
	mr = ibv_reg_mr(txq->priv->pd,
			(void *)mp->elt_va_start,
			(mp->elt_va_end - mp->elt_va_start),
			(IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE));
	if (unlikely(mr == NULL)) {
		DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
		      (void *)txq);
		return (uint32_t)-1;
	}
	if (unlikely(i == RTE_DIM(txq->mp2mr))) {
		/* Table is full, remove oldest entry. */
		DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
		      (void *)txq);
		--i;
		claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
		memmove(&txq->mp2mr[0], &txq->mp2mr[1],
			(sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
	}
	/* Store the new entry. */
	txq->mp2mr[i].mp = mp;
	txq->mp2mr[i].mr = mr;
	txq->mp2mr[i].lkey = mr->lkey;
	DEBUG("%p: new MR lkey for MP %p: 0x%08" PRIu32,
	      (void *)txq, (void *)mp, txq->mp2mr[i].lkey);
	return txq->mp2mr[i].lkey;
}
Beispiel #25
0
static void
app_configure_flow_table(void)
{
	uint32_t i, j;

	for (i = 0, j = 0; i < APP_FLOWS_MAX; i ++, j = (j + 1) % RTE_DIM(PARAMS)){
		FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
	}
}
Beispiel #26
0
static __rte_always_inline void
sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
{
	RTE_SET_USED(sw);
	struct rte_event_ring *worker = port->rx_worker_ring;
	port->pp_buf_start = 0;
	port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf,
			RTE_DIM(port->pp_buf), NULL);
}
Beispiel #27
0
extern int
rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg)
{
	if (ctx == NULL || (uint32_t)alg >= RTE_DIM(classify_fns))
		return -EINVAL;

	ctx->alg = alg;
	return 0;
}
Beispiel #28
0
/*
 * Test all hash functions.
 */
static void
run_hash_func_perf_tests(void)
{
	unsigned i, j, k;

	printf(" *** Hash function performance test results ***\n");
	printf(" Number of iterations for each test = %d\n",
			HASHTEST_ITERATIONS);
	printf("Hash Func.  , Key Length (bytes), Initial value, Ticks/Op.\n");

	for (i = 0; i < RTE_DIM(hashtest_initvals); i++) {
		for (j = 0; j < RTE_DIM(hashtest_key_lens); j++) {
			for (k = 0; k < RTE_DIM(hashtest_funcs); k++) {
				run_hash_func_perf_test(hashtest_key_lens[j],
						hashtest_initvals[i],
						hashtest_funcs[k]);
			}
		}
	}
}
Beispiel #29
0
/**
 * DPDK callback to get information about the device.
 *
 * @param dev
 *   Pointer to Ethernet device structure.
 * @param[out] info
 *   Info structure output buffer.
 */
void
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
	struct priv *priv = mlx5_get_priv(dev);
	unsigned int max;
	char ifname[IF_NAMESIZE];

	priv_lock(priv);
	/* FIXME: we should ask the device for these values. */
	info->min_rx_bufsize = 32;
	info->max_rx_pktlen = 65536;
	/*
	 * Since we need one CQ per QP, the limit is the minimum number
	 * between the two values.
	 */
	max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
	       priv->device_attr.max_qp : priv->device_attr.max_cq);
	/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
	if (max >= 65535)
		max = 65535;
	info->max_rx_queues = max;
	info->max_tx_queues = max;
	info->max_mac_addrs = RTE_DIM(priv->mac);
	info->rx_offload_capa =
		(priv->hw_csum ?
		 (DEV_RX_OFFLOAD_IPV4_CKSUM |
		  DEV_RX_OFFLOAD_UDP_CKSUM |
		  DEV_RX_OFFLOAD_TCP_CKSUM) :
		 0);
	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
	if (priv->hw_csum)
		info->tx_offload_capa |=
			(DEV_TX_OFFLOAD_IPV4_CKSUM |
			 DEV_TX_OFFLOAD_UDP_CKSUM |
			 DEV_TX_OFFLOAD_TCP_CKSUM);
	if (priv_get_ifname(priv, &ifname) == 0)
		info->if_index = if_nametoindex(ifname);
	/* FIXME: RETA update/query API expects the callee to know the size of
	 * the indirection table, for this PMD the size varies depending on
	 * the number of RX queues, it becomes impossible to find the correct
	 * size if it is not fixed.
	 * The API should be updated to solve this problem. */
	info->reta_size = priv->ind_table_max_size;
	info->speed_capa =
			ETH_LINK_SPEED_1G |
			ETH_LINK_SPEED_10G |
			ETH_LINK_SPEED_20G |
			ETH_LINK_SPEED_25G |
			ETH_LINK_SPEED_40G |
			ETH_LINK_SPEED_50G |
			ETH_LINK_SPEED_56G |
			ETH_LINK_SPEED_100G;
	priv_unlock(priv);
}
Beispiel #30
0
void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_init)
{
	prox_cfg->master = RTE_MAX_LCORE;

	for (uint32_t i = 0; i < RTE_DIM(prox_cfg->cpe_table_ports); ++i) {
		prox_cfg->cpe_table_ports[i] = -1;
	}

	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id];
		cur_lcore_cfg_init->id = lcore_id;
		for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) {
			struct task_args *targ = &cur_lcore_cfg_init->targs[task_id];
			for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
				targ->rx_ports[port_id] = NO_PORT_AVAIL;
			}
			targ->flags |= TASK_ARG_DROP;
			targ->flags |= TASK_ARG_QINQ_ACL;
			targ->cpe_table_timeout_ms = DEFAULT_CPE_TIMEOUT_MS;
			targ->n_flows = NB_PIPES;
			/* configure default values for QoS (can be overwritten by config) */
			targ->qos_conf.port_params = port_params_default;
			targ->qos_conf.pipe_params[0] = pipe_params_default;
			targ->qos_conf.subport_params[0] = subport_params_default;
			targ->qos_conf.port_params.pipe_profiles = targ->qos_conf.pipe_params;
			targ->qos_conf.port_params.rate = TEN_GIGABIT;
			targ->qinq_tag = ETYPE_8021ad;
			targ->n_concur_conn = 8192*2;

			for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
				targ->tx_port_queue[port_id].port = NO_PORT_AVAIL;
			}

			for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) {
				targ->mapping[i] = i; // identity
			}

			targ->cbs = ETHER_MAX_LEN;
			targ->ebs = ETHER_MAX_LEN;
			targ->pbs = ETHER_MAX_LEN;

			targ->n_max_rules = 1024;
			targ->ring_size = RING_RX_SIZE;
			targ->nb_cache_mbuf = MAX_PKT_BURST * 4;
			targ->overhead = ETHER_CRC_LEN + 20;

			targ->tunnel_hop_limit = 3;
			targ->ctrl_freq = 1000;
			targ->lb_friend_core = 0xFF;
			targ->mbuf_size = MBUF_SIZE;
		}
	}
}