Exemple #1
0
/*
 * Call add, lookup and delete for a single rule with depth <= 24
 */
int32_t
test6(void)
{
	struct rte_lpm *lpm = NULL;
	uint32_t ip = IPv4(0, 0, 0, 0);
	uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
	int32_t status = 0;

	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_free(lpm);

	return PASS;
}
Exemple #2
0
/*
 * Check that rte_lpm_lookup fails gracefully for incorrect user input
 * arguments
 */
int32_t
test5(void)
{
#if defined(RTE_LIBRTE_LPM_DEBUG)
	struct rte_lpm *lpm = NULL;
	uint32_t ip = IPv4(0, 0, 0, 0);
	uint8_t next_hop_return = 0;
	int32_t status = 0;

	/* rte_lpm_lookup: lpm == NULL */
	status = rte_lpm_lookup(NULL, ip, &next_hop_return);
	TEST_LPM_ASSERT(status < 0);

	/*Create vaild lpm to use in rest of test. */
	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	/* rte_lpm_lookup: depth < 1 */
	status = rte_lpm_lookup(lpm, ip, NULL);
	TEST_LPM_ASSERT(status < 0);

	rte_lpm_free(lpm);
#endif
	return PASS;
}
Exemple #3
0
int32_t
test13(void)
{
	struct rte_lpm *lpm = NULL;
	uint32_t ip, i;
	uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
	int32_t status = 0;

	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	ip = IPv4(128, 0, 0, 0);
	depth = 24;
	next_hop_add_1 = 100;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));

	depth = 32;
	next_hop_add_2 = 101;

	for (i = 0; i < 1000; i++) {
		status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
		TEST_LPM_ASSERT(status == 0);

		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
		TEST_LPM_ASSERT((status == 0) &&
				(next_hop_return == next_hop_add_2));

		status = rte_lpm_delete(lpm, ip, depth);
		TEST_LPM_ASSERT(status == 0);

		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
		TEST_LPM_ASSERT((status == 0) &&
				(next_hop_return == next_hop_add_1));
	}

	depth = 24;

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_free(lpm);

	return PASS;
}
Exemple #4
0
int lpm_entry_lookup(unsigned int ip, int socketid) {
	int ret = -1;
	uint32_t next_hop;

	return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lpm_lookup_struct[socketid],
		ip,
		&next_hop) == 0) ? next_hop : 255);
}
Exemple #5
0
int32_t
test12(void)
{
	__m128i ipx4;
	uint16_t hop[4];
	struct rte_lpm *lpm = NULL;
	uint32_t ip, i;
	uint8_t depth, next_hop_add, next_hop_return;
	int32_t status = 0;

	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	ip = IPv4(128, 0, 0, 0);
	depth = 32;
	next_hop_add = 100;

	for (i = 0; i < 1000; i++) {
		status = rte_lpm_add(lpm, ip, depth, next_hop_add);
		TEST_LPM_ASSERT(status == 0);

		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
		TEST_LPM_ASSERT((status == 0) &&
				(next_hop_return == next_hop_add));

		ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
		TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
		TEST_LPM_ASSERT(hop[1] == next_hop_add);
		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
		TEST_LPM_ASSERT(hop[3] == next_hop_add);

		status = rte_lpm_delete(lpm, ip, depth);
		TEST_LPM_ASSERT(status == 0);

		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
		TEST_LPM_ASSERT(status == -ENOENT);
	}

	rte_lpm_free(lpm);

	return PASS;
}
Exemple #6
0
/*
 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
 * No more tbl8 extensions will be allowed. Now add one more rule that required
 * a tbl8 extension and get fail.
 * */
int32_t
test14(void)
{

	/* We only use depth = 32 in the loop below so we must make sure
	 * that we have enough storage for all rules at that depth*/

	struct rte_lpm *lpm = NULL;
	uint32_t ip;
	uint8_t depth, next_hop_add, next_hop_return;
	int32_t status = 0;

	/* Add enough space for 256 rules for every depth */
	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	depth = 32;
	next_hop_add = 100;
	ip = IPv4(0, 0, 0, 0);

	/* Add 256 rules that require a tbl8 extension */
	for (; ip <= IPv4(0, 0, 255, 0); ip += 256) {
		status = rte_lpm_add(lpm, ip, depth, next_hop_add);
		TEST_LPM_ASSERT(status == 0);

		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
		TEST_LPM_ASSERT((status == 0) &&
				(next_hop_return == next_hop_add));
	}

	/* All tbl8 extensions have been used above. Try to add one more and
	 * we get a fail */
	ip = IPv4(1, 0, 0, 0);
	depth = 32;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status < 0);

	rte_lpm_free(lpm);

	return PASS;
}
Exemple #7
0
static int
rte_table_lpm_lookup(
	void *table,
	struct rte_mbuf **pkts,
	uint64_t pkts_mask,
	uint64_t *lookup_hit_mask,
	void **entries)
{
	struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
	uint64_t pkts_out_mask = 0;
	uint32_t i;

	__rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
	RTE_TABLE_LPM_STATS_PKTS_IN_ADD(lpm, n_pkts_in);

	pkts_out_mask = 0;
	for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
		__builtin_clzll(pkts_mask)); i++) {
		uint64_t pkt_mask = 1LLU << i;

		if (pkt_mask & pkts_mask) {
			struct rte_mbuf *pkt = pkts[i];
			uint32_t ip = rte_bswap32(
				RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
			int status;
			uint8_t nht_pos;

			status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
			if (status == 0) {
				pkts_out_mask |= pkt_mask;
				entries[i] = (void *) &lpm->nht[nht_pos *
					lpm->entry_size];
			}
		}
	}

	*lookup_hit_mask = pkts_out_mask;
	RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - __builtin_popcountll(pkts_out_mask));
	return 0;
}
Exemple #8
0
/*
 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
 *   lookup)
 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
 *   delete & lookup)
 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
 * - Delete a rule that is not present in the TBL24 & lookup
 * - Delete a rule that is not present in the TBL8 & lookup
 *
 */
int32_t
test10(void)
{

	struct rte_lpm *lpm = NULL;
	uint32_t ip;
	uint8_t depth, next_hop_add, next_hop_return;
	int32_t status = 0;

	/* Add rule that covers a TBL24 range previously invalid & lookup
	 * (& delete & lookup) */
	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
	TEST_LPM_ASSERT(lpm != NULL);

	ip = IPv4(128, 0, 0, 0);
	depth = 16;
	next_hop_add = 100;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_delete_all(lpm);

	ip = IPv4(128, 0, 0, 0);
	depth = 25;
	next_hop_add = 100;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	rte_lpm_delete_all(lpm);

	/* Add rule that extends a TBL24 valid entry & lookup for both rules
	 * (& delete & lookup) */

	ip = IPv4(128, 0, 0, 0);
	depth = 24;
	next_hop_add = 100;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	ip = IPv4(128, 0, 0, 10);
	depth = 32;
	next_hop_add = 101;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	ip = IPv4(128, 0, 0, 0);
	next_hop_add = 100;

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	ip = IPv4(128, 0, 0, 0);
	depth = 24;

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	ip = IPv4(128, 0, 0, 10);
	depth = 32;

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_delete_all(lpm);

	/* Add rule that updates the next hop in TBL24 & lookup
	 * (& delete & lookup) */

	ip = IPv4(128, 0, 0, 0);
	depth = 24;
	next_hop_add = 100;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	next_hop_add = 101;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_delete_all(lpm);

	/* Add rule that updates the next hop in TBL8 & lookup
	 * (& delete & lookup) */

	ip = IPv4(128, 0, 0, 0);
	depth = 32;
	next_hop_add = 100;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	next_hop_add = 101;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_delete_all(lpm);

	/* Delete a rule that is not present in the TBL24 & lookup */

	ip = IPv4(128, 0, 0, 0);
	depth = 24;

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status < 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_delete_all(lpm);

	/* Delete a rule that is not present in the TBL8 & lookup */

	ip = IPv4(128, 0, 0, 0);
	depth = 32;

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status < 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_free(lpm);

	return PASS;
}
Exemple #9
0
/*
 * - Add & lookup to hit invalid TBL24 entry
 * - Add & lookup to hit valid TBL24 entry not extended
 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
 *
 */
int32_t
test9(void)
{
	struct rte_lpm *lpm = NULL;
	uint32_t ip, ip_1, ip_2;
	uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
		next_hop_add_2, next_hop_return;
	int32_t status = 0;

	/* Add & lookup to hit invalid TBL24 entry */
	ip = IPv4(128, 0, 0, 0);
	depth = 24;
	next_hop_add = 100;

	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_delete_all(lpm);

	/* Add & lookup to hit valid TBL24 entry not extended */
	ip = IPv4(128, 0, 0, 0);
	depth = 23;
	next_hop_add = 100;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	depth = 24;
	next_hop_add = 101;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	depth = 24;

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	depth = 23;

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_delete_all(lpm);

	/* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
	 * entry */
	ip = IPv4(128, 0, 0, 0);
	depth = 32;
	next_hop_add = 100;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	ip = IPv4(128, 0, 0, 5);
	depth = 32;
	next_hop_add = 101;

	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	ip = IPv4(128, 0, 0, 0);
	depth = 32;
	next_hop_add = 100;

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));

	status = rte_lpm_delete(lpm, ip, depth);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_delete_all(lpm);

	/* Add & lookup to hit valid extended TBL24 entry with valid TBL8
	 * entry */
	ip_1 = IPv4(128, 0, 0, 0);
	depth_1 = 25;
	next_hop_add_1 = 101;

	ip_2 = IPv4(128, 0, 0, 5);
	depth_2 = 32;
	next_hop_add_2 = 102;

	next_hop_return = 0;

	status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));

	status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));

	status = rte_lpm_delete(lpm, ip_2, depth_2);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));

	status = rte_lpm_delete(lpm, ip_1, depth_1);
	TEST_LPM_ASSERT(status == 0);

	status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
	TEST_LPM_ASSERT(status == -ENOENT);

	rte_lpm_free(lpm);

	return PASS;
}
Exemple #10
0
/*
 * Use rte_lpm_add to add rules which effect only the second half of the lpm
 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
 * depth. Check lookup hit for on every add and check for lookup miss on the
 * first half of the lpm table after each add. Finally delete all rules going
 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
 * delete. The lookup should return the next_hop_add value related to the
 * previous depth value (i.e. depth -1).
 */
int32_t
test8(void)
{
	__m128i ipx4;
	uint16_t hop[4];
	struct rte_lpm *lpm = NULL;
	uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
	uint8_t depth, next_hop_add, next_hop_return;
	int32_t status = 0;

	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	/* Loop with rte_lpm_add. */
	for (depth = 1; depth <= 32; depth++) {
		/* Let the next_hop_add value = depth. Just for change. */
		next_hop_add = depth;

		status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
		TEST_LPM_ASSERT(status == 0);

		/* Check IP in first half of tbl24 which should be empty. */
		status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
		TEST_LPM_ASSERT(status == -ENOENT);

		status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
		TEST_LPM_ASSERT((status == 0) &&
			(next_hop_return == next_hop_add));

		ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
		TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
		TEST_LPM_ASSERT(hop[1] == next_hop_add);
		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
		TEST_LPM_ASSERT(hop[3] == next_hop_add);
	}

	/* Loop with rte_lpm_delete. */
	for (depth = 32; depth >= 1; depth--) {
		next_hop_add = (uint8_t) (depth - 1);

		status = rte_lpm_delete(lpm, ip2, depth);
		TEST_LPM_ASSERT(status == 0);

		status = rte_lpm_lookup(lpm, ip2, &next_hop_return);

		if (depth != 1) {
			TEST_LPM_ASSERT((status == 0) &&
				(next_hop_return == next_hop_add));
		}
		else {
			TEST_LPM_ASSERT(status == -ENOENT);
		}

		status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
		TEST_LPM_ASSERT(status == -ENOENT);

		ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
		if (depth != 1) {
			TEST_LPM_ASSERT(hop[0] == next_hop_add);
			TEST_LPM_ASSERT(hop[1] == next_hop_add);
		} else {
			TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
			TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
		}
		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
		TEST_LPM_ASSERT(hop[3] == UINT16_MAX);
	}

	rte_lpm_free(lpm);

	return PASS;
}
Exemple #11
0
int32_t
perf_test(void)
{
	struct rte_lpm *lpm = NULL;
	uint64_t begin, total_time, lpm_used_entries = 0;
	unsigned i, j;
	uint8_t next_hop_add = 0xAA, next_hop_return = 0;
	int status = 0;
	uint64_t cache_line_counter = 0;
	int64_t count = 0;

	rte_srand(rte_rdtsc());

	printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);

	print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);

	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	/* Measue add. */
	begin = rte_rdtsc();

	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
		if (rte_lpm_add(lpm, large_route_table[i].ip,
				large_route_table[i].depth, next_hop_add) == 0)
			status++;
	}
	/* End Timer. */
	total_time = rte_rdtsc() - begin;

	printf("Unique added entries = %d\n", status);
	/* Obtain add statistics. */
	for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
		if (lpm->tbl24[i].valid)
			lpm_used_entries++;

		if (i % 32 == 0){
			if ((uint64_t)count < lpm_used_entries) {
				cache_line_counter++;
				count = lpm_used_entries;
			}
		}
	}

	printf("Used table 24 entries = %u (%g%%)\n",
			(unsigned) lpm_used_entries,
			(lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
	printf("64 byte Cache entries used = %u (%u bytes)\n",
			(unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);

	printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);

	/* Measure single Lookup */
	total_time = 0;
	count = 0;

	for (i = 0; i < ITERATIONS; i ++) {
		static uint32_t ip_batch[BATCH_SIZE];

		for (j = 0; j < BATCH_SIZE; j ++)
			ip_batch[j] = rte_rand();

		/* Lookup per batch */
		begin = rte_rdtsc();

		for (j = 0; j < BATCH_SIZE; j ++) {
			if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
				count++;
		}

		total_time += rte_rdtsc() - begin;

	}
	printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));

	/* Measure bulk Lookup */
	total_time = 0;
	count = 0;
	for (i = 0; i < ITERATIONS; i ++) {
		static uint32_t ip_batch[BATCH_SIZE];
		uint16_t next_hops[BULK_SIZE];

		/* Create array of random IP addresses */
		for (j = 0; j < BATCH_SIZE; j ++)
			ip_batch[j] = rte_rand();

		/* Lookup per batch */
		begin = rte_rdtsc();
		for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
			unsigned k;
			rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
			for (k = 0; k < BULK_SIZE; k++)
				if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
					count++;
		}

		total_time += rte_rdtsc() - begin;
	}
	printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));

	/* Measure LookupX4 */
	total_time = 0;
	count = 0;
	for (i = 0; i < ITERATIONS; i++) {
		static uint32_t ip_batch[BATCH_SIZE];
		uint16_t next_hops[4];

		/* Create array of random IP addresses */
		for (j = 0; j < BATCH_SIZE; j++)
			ip_batch[j] = rte_rand();

		/* Lookup per batch */
		begin = rte_rdtsc();
		for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
			unsigned k;
			__m128i ipx4;

			ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
			ipx4 = *(__m128i *)(ip_batch + j);
			rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
			for (k = 0; k < RTE_DIM(next_hops); k++)
				if (unlikely(next_hops[k] == UINT16_MAX))
					count++;
		}

		total_time += rte_rdtsc() - begin;
	}
	printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));

	/* Delete */
	status = 0;
	begin = rte_rdtsc();

	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
		/* rte_lpm_delete(lpm, ip, depth) */
		status += rte_lpm_delete(lpm, large_route_table[i].ip,
				large_route_table[i].depth);
	}

	total_time += rte_rdtsc() - begin;

	printf("Average LPM Delete: %g cycles\n",
			(double)total_time / NUM_ROUTE_ENTRIES);

	rte_lpm_delete_all(lpm);
	rte_lpm_free(lpm);

	return PASS;
}
Exemple #12
0
/*
 * Test for overwriting of tbl8:
 *  - add rule /32 and lookup
 *  - add new rule /24 and lookup
 *	- add third rule /25 and lookup
 *	- lookup /32 and /24 rule to ensure the table has not been overwritten.
 */
int32_t
test17(void)
{
	struct rte_lpm *lpm = NULL;
	const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
	const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
	const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
	const uint8_t d_ip_10_32 = 32,
			d_ip_10_24 = 24,
			d_ip_20_25 = 25;
	const uint8_t next_hop_ip_10_32 = 100,
			next_hop_ip_10_24 = 105,
			next_hop_ip_20_25 = 111;
	uint8_t next_hop_return = 0;
	int32_t status = 0;

	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
			next_hop_ip_10_32)) < 0)
		return -1;

	status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
	uint8_t test_hop_10_32 = next_hop_return;
	TEST_LPM_ASSERT(status == 0);
	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);

	if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
			next_hop_ip_10_24)) < 0)
			return -1;

	status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
	uint8_t test_hop_10_24 = next_hop_return;
	TEST_LPM_ASSERT(status == 0);
	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);

	if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
			next_hop_ip_20_25)) < 0)
		return -1;

	status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
	uint8_t test_hop_20_25 = next_hop_return;
	TEST_LPM_ASSERT(status == 0);
	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);

	if (test_hop_10_32 == test_hop_10_24) {
		printf("Next hop return equal\n");
		return -1;
	}

	if (test_hop_10_24 == test_hop_20_25){
		printf("Next hop return equal\n");
		return -1;
	}

	status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
	TEST_LPM_ASSERT(status == 0);
	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);

	status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
	TEST_LPM_ASSERT(status == 0);
	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);

	rte_lpm_free(lpm);

	return PASS;
}
Exemple #13
0
static int
neighbor4(neighbor_action_t action,
	  __s32 port_id,
	  struct in_addr* addr,
	  struct ether_addr* lladdr,
	  __u8 flags,
	  __rte_unused __u16 vlan_id,
	  void* args)
{
	// if port_id is not handled
	//   ignore, return immediatly
	// if neighbor add
	//   lookup neighbor
	//   if exists
	//     update lladdr, set flag as REACHABLE/STALE/DELAY
	//   else
	//     // This should not happen
	//     insert new nexthop
	//     set insert date=now, refcount = 0, flag=REACHABLE/STALE/DELAY
	// if neighbor delete
	//   lookup neighbor
	//   if exists
	//     if refcount != 0
	//       set nexthop as invalid
	//     else
	//       set flag empty
	//   else
	//     do nothing
	//     // this should not happen

	struct control_handle* handle = args;
	assert(handle != NULL);
	int s;
	uint16_t nexthop_id;
    uint32_t find_id;
	int32_t socket_id = handle->socket_id;
	char ipbuf[INET_ADDRSTRLEN];

	assert(neighbor4_struct != NULL);

	if (addr == NULL)
		return -1;
	inet_ntop(AF_INET, addr, ipbuf, INET_ADDRSTRLEN);

	if (action == NEIGHBOR_ADD) {
		if (lladdr == NULL)
			return -1;
		char ibuf[IFNAMSIZ];
		unsigned kni_vlan;

		if_indextoname(port_id, ibuf);
		s = sscanf(ibuf, "dpdk%10u.%10u", &port_id, &kni_vlan);
		if (s <= 0) {
			RTE_LOG(ERR, PKTJ_CTRL1,
				"received a neighbor "
				"announce for an unmanaged "
				"iface %s\n",
				ibuf);
			return -1;
		}

		s = neighbor4_lookup_nexthop(neighbor4_struct[socket_id], addr,
					     &nexthop_id);
		if (s < 0) {
			if (flags != NUD_NONE && flags != NUD_NOARP &&
			    flags != NUD_STALE) {
				RTE_LOG(ERR, PKTJ_CTRL1,
					"failed to change state in neighbor4 "
					"table (state %d, %s)...\n",
					flags, ipbuf);
				return -1;
			}

			{
				RTE_LOG(DEBUG, PKTJ_CTRL1,
					"adding ipv4 neighbor %s with port %s "
					"vlan_id %d...\n",
					ipbuf, ibuf, kni_vlan);
			}

			s = neighbor4_add_nexthop(neighbor4_struct[socket_id],
						  addr, &nexthop_id,
						  NEI_ACTION_FWD);
			if (s < 0) {
				RTE_LOG(ERR, PKTJ_CTRL1,
					"failed to add a "
					"nexthop in neighbor "
					"table...\n");
				return -1;
			}

			if (rte_lpm_lookup(ipv4_pktj_lookup_struct[socket_id],
					   rte_be_to_cpu_32(addr->s_addr),
					   &find_id) == 0) {
				s = rte_lpm_add(
				    ipv4_pktj_lookup_struct[socket_id],
				    rte_be_to_cpu_32(addr->s_addr), 32,
				    nexthop_id);
				if (s < 0) {
					lpm4_stats[socket_id].nb_add_ko++;
					RTE_LOG(ERR, PKTJ_CTRL1,
						"failed to add a route in "
						"lpm during neighbor "
						"adding...\n");
					return -1;
				}
				lpm4_stats[socket_id].nb_add_ok++;
			}
		}

		if (flags == NUD_FAILED) {
			neighbor4_set_action(neighbor4_struct[socket_id],
					     nexthop_id, NEI_ACTION_KNI);
		} else {
			neighbor4_set_action(neighbor4_struct[socket_id],
					     nexthop_id, NEI_ACTION_FWD);
		}
		RTE_LOG(DEBUG, PKTJ_CTRL1,
			"set neighbor4 with port_id %d state %d\n", port_id,
			flags);
		neighbor4_set_lladdr_port(neighbor4_struct[socket_id],
					  nexthop_id, &ports_eth_addr[port_id],
					  lladdr, port_id, kni_vlan);
		neighbor4_set_state(neighbor4_struct[socket_id], nexthop_id,
				    flags);
	}
	if (action == NEIGHBOR_DELETE) {
		if (flags != NUD_FAILED && flags != NUD_STALE) {
			RTE_LOG(
			    DEBUG, PKTJ_CTRL1,
			    "neighbor4 delete ope failed, bad NUD state: %d \n",
			    flags);
			return -1;
		}

		RTE_LOG(DEBUG, PKTJ_CTRL1, "deleting ipv4 neighbor...\n");
		s = neighbor4_lookup_nexthop(neighbor4_struct[socket_id], addr,
					     &nexthop_id);
		if (s < 0) {
			RTE_LOG(ERR, PKTJ_CTRL1,
				"failed to find a nexthop to "
				"delete in neighbor "
				"table...\n");
			return 0;
		}
		neighbor4_delete(neighbor4_struct[socket_id], nexthop_id);
		// FIXME not thread safe
		if (neighbor4_struct[socket_id]
			->entries.t4[nexthop_id]
			.neighbor.refcnt == 0) {
			s = rte_lpm_delete(ipv4_pktj_lookup_struct[socket_id],
					   rte_be_to_cpu_32(addr->s_addr), 32);
			if (s < 0) {
				lpm4_stats[socket_id].nb_del_ko++;
				RTE_LOG(ERR, PKTJ_CTRL1,
					"failed to delete route...\n");
				return -1;
			}
			lpm4_stats[socket_id].nb_del_ok++;
		}
	}
	RTE_LOG(DEBUG, PKTJ_CTRL1, "neigh %s ope success\n", ipbuf);
	return 0;
}
Exemple #14
0
int
mg_table_lpm_lookup(
	void *table,
	struct rte_mbuf **pkts,
	uint64_t pkts_mask,
	uint64_t *lookup_hit_mask,
	void **entries)
{
  //printf("ENTRIES = %p\n", entries);
	struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
	uint64_t pkts_out_mask = 0;
	uint32_t i;

  //struct rte_pktmbuf pkt0 = pkts[0]->pkt;
  //printf("headroom: %d\n", rte_pktmbuf_headroom(pkts[0]));
  ////void * data = pkt0.data+128;
  //void * data = pkt0.data;
  //printhex("data          = ", data, 256);
  //printhex("data buf addr = ", pkts[0]->buf_addr, 256);
  //printhex("pktinmask = ", &pkts_mask, 8);
  //printhex("ipaddr = ", pkts[0]->buf_addr + lpm->offset, 4);

	pkts_out_mask = 0;
  if(!pkts_mask){
    // workaround for DPDK bug:
    // __builtin_clzll(x) is undefined for x = 0
    *lookup_hit_mask = pkts_out_mask;
    return 0;
  }
	for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
		__builtin_clzll(pkts_mask)); i++) {
    //printf("loop %d\n", i);
		uint64_t pkt_mask = 1LLU << i;

		if (pkt_mask & pkts_mask) {
      //printf("pktmaskmatch\n");
			struct rte_mbuf *pkt = pkts[i];
			//uint32_t ip = rte_bswap32(
			//	*((uint32_t*)(&RTE_MBUF_METADATA_UINT8(pkt, lpm->offset))));
			uint32_t ip = rte_bswap32( *((uint32_t*)(pkt->buf_addr + lpm->offset)) );
			//uint32_t ip = ( *((uint32_t*)(pkt->buf_addr + lpm->offset)) );
      //printhex("checking ip: ", &ip, 4);
			int status;
			uint8_t nht_pos;

			status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
      //printf(" status: %d\n", status);
			if (status == 0) {
        //printf("HIT HIT HIT\n");
				pkts_out_mask |= pkt_mask;
				entries[i] = (void *) &lpm->nht[nht_pos *
					lpm->entry_size];
      }else{
        entries[i] = NULL;
      }
      //printf("r: entries[%d\t] = %p\n", i, entries[i]);
      //printf("r: entries pp[%d\t] = %p\n", i, entries+i);
      //printf("r: entries[%d\t] = %p\n", i, *(entries+i));
      //printf("  iface = %d\n", ((uint8_t*)(entries[i]))[4]);
		}
    // FIXME: if input mask does not match should we also set entry ptr to NULL?
	}

	*lookup_hit_mask = pkts_out_mask;

	return 0;
}