/* * Add two rules, lookup to hit the more specific one, lookup to hit the less * specific one delete the less specific rule and lookup previous values again; * add a more specific rule than the existing rule, lookup again * * */ int32_t test11(void) { struct rte_lpm *lpm = NULL; uint32_t ip; uint8_t depth, next_hop_add, next_hop_return; int32_t status = 0; lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); ip = IPv4(128, 0, 0, 0); depth = 24; next_hop_add = 100; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); ip = IPv4(128, 0, 0, 10); depth = 32; next_hop_add = 101; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); ip = IPv4(128, 0, 0, 0); next_hop_add = 100; status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); ip = IPv4(128, 0, 0, 0); depth = 24; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); ip = IPv4(128, 0, 0, 10); depth = 32; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_free(lpm); return PASS; }
/* * Check that rte_lpm_delete fails gracefully for incorrect user input * arguments */ int32_t test4(void) { struct rte_lpm *lpm = NULL; uint32_t ip = IPv4(0, 0, 0, 0); uint8_t depth = 24; int32_t status = 0; /* rte_lpm_delete: lpm == NULL */ status = rte_lpm_delete(NULL, ip, depth); TEST_LPM_ASSERT(status < 0); /*Create vaild lpm to use in rest of test. */ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); /* rte_lpm_delete: depth < 1 */ status = rte_lpm_delete(lpm, ip, 0); TEST_LPM_ASSERT(status < 0); /* rte_lpm_delete: depth > MAX_DEPTH */ status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1)); TEST_LPM_ASSERT(status < 0); rte_lpm_free(lpm); return PASS; }
static char PktCheck(unsigned int dlt, struct pcap_pkthdr *h, u_char *bytes) { switch (dlt) { case DLT_EN10MB: return Ethernet(bytes, h->caplen); break; case DLT_RAW: return IPv4(bytes, h->caplen); break; case DLT_IEEE802_11: break; case DLT_PPP: return Ppp(bytes, h->caplen); break; case DLT_LINUX_SLL: //return Sll(bytes, h->caplen); break; } return 0; }
static char Ethernet(u_char *bytes, unsigned long len) { struct ethhdr *eth; u_char *next; eth = (struct ethhdr *)bytes; /* pdu */ next = bytes + sizeof(struct ethhdr); len -= sizeof(struct ethhdr); switch (ntohs(eth->h_proto)) { case ETHERTYPE_IP: if (def) { if (ipv6f) return 0; } return IPv4(next, len); break; case ETHERTYPE_IPv6: if (def) { if (ipv6f == 0) return 0; } return IPv6(next, len); break; case ETHERTYPE_PPPOES: return PPPoE(next, len); break; } return 0; }
static void prepare_pkt(struct rte_mbuf *mbuf) { struct ether_hdr *eth_hdr; struct vlan_hdr *vlan1, *vlan2; struct ipv4_hdr *ip_hdr; /* Simulate a classifier */ eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); vlan1 = (struct vlan_hdr *)(ð_hdr->ether_type ); vlan2 = (struct vlan_hdr *)((uintptr_t)ð_hdr->ether_type + sizeof(struct vlan_hdr)); eth_hdr = (struct ether_hdr *)((uintptr_t)ð_hdr->ether_type + 2 *sizeof(struct vlan_hdr)); ip_hdr = (struct ipv4_hdr *)((uintptr_t)eth_hdr + sizeof(eth_hdr->ether_type)); vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT); vlan2->vlan_tci = rte_cpu_to_be_16(PIPE); eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE); rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW); /* 64 byte packet */ mbuf->pkt.pkt_len = 60; mbuf->pkt.data_len = 60; }
static void send_paxos_message(paxos_message *pm) { uint8_t port_id = 0; struct rte_mbuf *created_pkt = rte_pktmbuf_alloc(mbuf_pool); created_pkt->l2_len = sizeof(struct ether_hdr); created_pkt->l3_len = sizeof(struct ipv4_hdr); created_pkt->l4_len = sizeof(struct udp_hdr) + sizeof(paxos_message); craft_new_packet(&created_pkt, IPv4(192,168,4,99), ACCEPTOR_ADDR, PROPOSER_PORT, ACCEPTOR_PORT, sizeof(paxos_message), port_id); //struct udp_hdr *udp; size_t udp_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); //udp = rte_pktmbuf_mtod_offset(created_pkt, struct udp_hdr *, udp_offset); size_t paxos_offset = udp_offset + sizeof(struct udp_hdr); struct paxos_hdr *px = rte_pktmbuf_mtod_offset(created_pkt, struct paxos_hdr *, paxos_offset); px->msgtype = rte_cpu_to_be_16(pm->type); px->inst = rte_cpu_to_be_32(pm->u.accept.iid); px->inst = rte_cpu_to_be_32(pm->u.accept.iid); px->rnd = rte_cpu_to_be_16(pm->u.accept.ballot); px->vrnd = rte_cpu_to_be_16(pm->u.accept.value_ballot); px->acptid = 0; rte_memcpy(px->paxosval, pm->u.accept.value.paxos_value_val, pm->u.accept.value.paxos_value_len); created_pkt->ol_flags = PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM; const uint16_t nb_tx = rte_eth_tx_burst(port_id, 0, &created_pkt, 1); rte_pktmbuf_free(created_pkt); rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Send %d messages\n", nb_tx); }
/* * Check that rte_lpm_lookup fails gracefully for incorrect user input * arguments */ int32_t test5(void) { #if defined(RTE_LIBRTE_LPM_DEBUG) struct rte_lpm *lpm = NULL; uint32_t ip = IPv4(0, 0, 0, 0); uint8_t next_hop_return = 0; int32_t status = 0; /* rte_lpm_lookup: lpm == NULL */ status = rte_lpm_lookup(NULL, ip, &next_hop_return); TEST_LPM_ASSERT(status < 0); /*Create vaild lpm to use in rest of test. */ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); /* rte_lpm_lookup: depth < 1 */ status = rte_lpm_lookup(lpm, ip, NULL); TEST_LPM_ASSERT(status < 0); rte_lpm_free(lpm); #endif return PASS; }
/* * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension. * No more tbl8 extensions will be allowed. Now add one more rule that required * a tbl8 extension and get fail. * */ int32_t test14(void) { /* We only use depth = 32 in the loop below so we must make sure * that we have enough storage for all rules at that depth*/ struct rte_lpm *lpm = NULL; uint32_t ip; uint8_t depth, next_hop_add, next_hop_return; int32_t status = 0; /* Add enough space for 256 rules for every depth */ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0); TEST_LPM_ASSERT(lpm != NULL); depth = 32; next_hop_add = 100; ip = IPv4(0, 0, 0, 0); /* Add 256 rules that require a tbl8 extension */ for (; ip <= IPv4(0, 0, 255, 0); ip += 256) { status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); } /* All tbl8 extensions have been used above. Try to add one more and * we get a fail */ ip = IPv4(1, 0, 0, 0); depth = 32; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status < 0); rte_lpm_free(lpm); return PASS; }
static char Ppp(u_char *bytes, unsigned long len) { unsigned char prot; int proto_offset; unsigned short ppp_prot; int nlen; u_char *next; /* PPP HDLC encapsulation */ if (*bytes == 0xff) { proto_offset = 2; } else { /* address and control are compressed (NULL) */ proto_offset = 0; } prot = *(bytes + proto_offset); len = 0; if (prot & PFC_BIT) { /* Compressed protocol field - just the byte we fetched. */ ppp_prot = prot; nlen = 1; } else { ppp_prot = ntohs(*((uint16_t *)(bytes + proto_offset))); nlen = 2; } /* pdu */ next = bytes + nlen + proto_offset; len -= nlen + proto_offset; switch (ppp_prot) { case ETHERTYPE_IP: if (def) { if (ipv6f) return 0; } return IPv4(next, len); break; case ETHERTYPE_IPv6: if (def) { if (ipv6f == 0) return 0; } return IPv6(next, len); break; } return 0; }
static void test_antispoof_arp_gratuitous(void) { # include "arp_gratuitous.c" const unsigned char *pkts[] = {pkt1}; int pkts_size[] = {42}; uint16_t pkts_nb = 1; struct ether_addr inside_mac; uint32_t inside_ip; pg_scan_ether_addr(&inside_mac, "00:23:df:ff:c9:23"); inside_ip = htobe32(IPv4(192, 168, 22, 56)); test_antispoof_generic(pkts, pkts_size, pkts_nb, inside_mac, inside_ip); }
static void test_antispoof_arp_request(void) { # include "arp_request.c" const unsigned char *pkts[] = {pkt1}; int pkts_size[] = {42}; uint16_t pkts_nb = 1; struct ether_addr inside_mac; uint32_t inside_ip; pg_scan_ether_addr(&inside_mac, "00:e0:81:d5:02:91"); inside_ip = htobe32(IPv4(192, 168, 21, 253)); test_antispoof_generic(pkts, pkts_size, pkts_nb, inside_mac, inside_ip); }
static void test_antispoof_arp_response(void) { # include "arp_response.c" const unsigned char *pkts[] = {pkt1}; int pkts_size[] = {42}; uint16_t pkts_nb = 1; struct ether_addr inside_mac; uint32_t inside_ip; pg_scan_ether_addr(&inside_mac, "00:18:b9:56:2e:73"); inside_ip = htobe32(IPv4(192, 168, 21, 2)); test_antispoof_generic(pkts, pkts_size, pkts_nb, inside_mac, inside_ip); }
/* * Parse ClassBench rules file. * Expected format: * '@'<src_ipv4_addr>'/'<masklen> <space> \ * <dst_ipv4_addr>'/'<masklen> <space> \ * <src_port_low> <space> ":" <src_port_high> <space> \ * <dst_port_low> <space> ":" <dst_port_high> <space> \ * <proto>'/'<mask> */ static int parse_ipv4_net(const char *in, uint32_t *addr, uint32_t *mask_len) { uint8_t a, b, c, d, m; // TODO may be replaced by inet_pton with some refactoring GET_CB_FIELD(in, a, 0, UINT8_MAX, '.'); GET_CB_FIELD(in, b, 0, UINT8_MAX, '.'); GET_CB_FIELD(in, c, 0, UINT8_MAX, '.'); GET_CB_FIELD(in, d, 0, UINT8_MAX, '/'); GET_CB_FIELD(in, m, 0, sizeof(uint32_t) * CHAR_BIT, 0); addr[0] = IPv4(a, b, c, d); mask_len[0] = m; return 0; }
// Called by go code int ipv4_route_init(uint32_t nb_entries) { uint32_t i; uint32_t bcast_ip = IPv4(0xff, 0xff, 0xff, 0xff); ipv4_route_table = (mtrie_t*) malloc (sizeof(mtrie_t) * nb_entries); if (!ipv4_route_table) return -EAGAIN; bcast_nh.data = NULL; bcast_nh.fn = bcast_pkt_handler; for (i = 0; i < nb_entries; i++) { mtrie_init(&ipv4_route_table[i], 3); ipv4_route_add(i, (uint8_t*)&bcast_ip, &bcast_nh); } return 0; }
int32_t test12(void) { __m128i ipx4; uint16_t hop[4]; struct rte_lpm *lpm = NULL; uint32_t ip, i; uint8_t depth, next_hop_add, next_hop_return; int32_t status = 0; lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); ip = IPv4(128, 0, 0, 0); depth = 32; next_hop_add = 100; for (i = 0; i < 1000; i++) { status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1); rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); TEST_LPM_ASSERT(hop[0] == UINT16_MAX); TEST_LPM_ASSERT(hop[1] == next_hop_add); TEST_LPM_ASSERT(hop[2] == UINT16_MAX); TEST_LPM_ASSERT(hop[3] == next_hop_add); status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); } rte_lpm_free(lpm); return PASS; }
struct lwipthread_opts * RNH_LWIP = make_lwipopts(RNH_MAC, RNH_IP, NETMASK, GATEWAY); const struct sockaddr * RNH_BATTERY_ADDR = make_addr(RNH_IP, RNH_BATTERY); const struct sockaddr * RNH_PORT_ADDR = make_addr(RNH_IP, RNH_PORT); const struct sockaddr * RNH_ALARM_ADDR = make_addr(RNH_IP, RNH_ALARM); const struct sockaddr * RNH_UMBDET_ADDR = make_addr(RNH_IP, RNH_UMBDET); /* Rocket Tracks Controller */ #define RTX_IP IPv4(10, 0, 0, 40) #define RTX_MAC (uint8_t[6]){0xE6, 0x10, 0x20, 0x30, 0x40, 0xdd} #define RTX_MANUAL 36200 // Manual Control listener #define RTX_NEUTRAL 36201 // Axis Neutral data #define RTX_FROMSLA 36202 // Sightline listener #define RTX_DIAG 36205 // Axis Diagnostic data struct lwipthread_opts * RTX_LWIP = make_lwipopts(RTX_MAC, RTX_IP, NETMASK, IPv4(10,0,0,1)); const struct sockaddr * RTX_MANUAL_ADDR = make_addr(RTX_IP, RTX_MANUAL); const struct sockaddr * RTX_NEUTRAL_ADDR = make_addr(RTX_IP, RTX_NEUTRAL); const struct sockaddr * RTX_FROMSLA_ADDR = make_addr(RTX_IP, RTX_FROMSLA); const struct sockaddr * RTX_DIAG_ADDR = make_addr(RTX_IP, RTX_DIAG); /* Rocket Tracks Manual Control Box */ #define RTXMAN_IP IPv4(10, 0, 0, 45) #define RTXMAN_MAC (uint8_t[6]){0xE6, 0x10, 0x20, 0x30, 0x40, 0xee} #define RTXMAN_OUT 36203 // Manual Control data #define RTXMAN_NEUTRAL 36204 // Axis Neutral listener #define RTXMAN_DIAG 36206 // Axis Neutral listener struct lwipthread_opts * RTXMAN_LWIP = make_lwipopts(RTXMAN_MAC, RTXMAN_IP, NETMASK, IPv4(10,0,0,1)); const struct sockaddr * RTXMAN_OUT_ADDR = make_addr(RTXMAN_IP, RTXMAN_OUT); const struct sockaddr * RTXMAN_NEUTRAL_ADDR = make_addr(RTXMAN_IP, RTXMAN_NEUTRAL);
IPv4 IPv4::XOR(IPv4 ip1, IPv4 ip2) { return IPv4(ip1._ip[0] ^ ip2._ip[0] , ip1._ip[1] ^ ip2._ip[1] , ip1._ip[2] ^ ip2._ip[2] , ip1._ip[3] ^ ip2._ip[3]); }
* Security Associations */ #include <netinet/ip.h> #include <rte_memzone.h> #include <rte_crypto.h> #include <rte_cryptodev.h> #include <rte_byteorder.h> #include <rte_errno.h> #include "ipsec.h" #include "esp.h" /* SAs EP0 Outbound */ const struct ipsec_sa sa_ep0_out[] = { { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5), NULL, NULL, esp4_tunnel_outbound_pre_crypto, esp4_tunnel_outbound_post_crypto, RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC, 12, 16, 16, 0, 0 }, { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6), NULL, NULL, esp4_tunnel_outbound_pre_crypto, esp4_tunnel_outbound_post_crypto, RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC, 12, 16, 16, 0, 0 }, { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7), NULL, NULL,
static void test_antispoof_generic(const unsigned char **pkts, int *pkts_size, uint16_t pkts_nb, struct ether_addr inside_mac, uint32_t inside_ip) { struct pg_brick *gen_west; struct pg_brick *antispoof; struct pg_brick *col_east; struct pg_error *error = NULL; uint16_t packet_count; uint16_t i; struct rte_mbuf *packet; uint64_t filtered_pkts_mask; struct rte_mbuf **filtered_pkts; /* [generator>]--[antispoof]--[collector] */ gen_west = pg_packetsgen_new("gen_west", 1, 1, EAST_SIDE, &packet, 1, &error); g_assert(!error); antispoof = pg_antispoof_new("antispoof", 1, 1, EAST_SIDE, inside_mac, &error); g_assert(!error); col_east = pg_collect_new("col_east", 1, 1, &error); g_assert(!error); pg_brick_link(gen_west, antispoof, &error); g_assert(!error); pg_brick_link(antispoof, col_east, &error); g_assert(!error); /* enable ARP antispoof with the correct IP */ pg_antispoof_arp_enable(antispoof, inside_ip); /* replay traffic */ for (i = 0; i < pkts_nb; i++) { packet = build_packet(pkts[i], pkts_size[i]); pg_brick_poll(gen_west, &packet_count, &error); g_assert(!error); g_assert(packet_count == 1); filtered_pkts = pg_brick_west_burst_get(col_east, &filtered_pkts_mask, &error); g_assert(!error); g_assert(pg_mask_count(filtered_pkts_mask) == 1); pg_packets_free(filtered_pkts, filtered_pkts_mask); rte_pktmbuf_free(packet); } /* set another IP, should not pass */ inside_ip = htobe32(IPv4(42, 0, 42, 0)); pg_antispoof_arp_enable(antispoof, inside_ip); /* replay traffic */ for (i = 0; i < pkts_nb; i++) { packet = build_packet(pkts[i], pkts_size[i]); pg_brick_poll(gen_west, &packet_count, &error); g_assert(!error); g_assert(packet_count == 1); filtered_pkts = pg_brick_west_burst_get(col_east, &filtered_pkts_mask, &error); g_assert(!error); g_assert(pg_mask_count(filtered_pkts_mask) == 0); pg_packets_free(filtered_pkts, filtered_pkts_mask); rte_pktmbuf_free(packet); } pg_brick_destroy(gen_west); pg_brick_destroy(antispoof); pg_brick_destroy(col_east); }
static void test_pg_antispoof_arp_disable(void) { # include "arp_request.c" const unsigned char *pkts[] = {pkt1}; int pkts_size[] = {42}; uint16_t pkts_nb = 1; struct ether_addr inside_mac; uint32_t inside_ip; struct pg_brick *gen_west; struct pg_brick *antispoof; struct pg_brick *col_east; struct pg_error *error = NULL; uint16_t packet_count; uint16_t i; struct rte_mbuf *packet; uint64_t filtered_pkts_mask; struct rte_mbuf **filtered_pkts; pg_scan_ether_addr(&inside_mac, "00:e0:81:d5:02:91"); inside_ip = htobe32(IPv4(0, 0, 0, 42)); /* [generator>]--[antispoof]--[collector] */ gen_west = pg_packetsgen_new("gen_west", 1, 1, EAST_SIDE, &packet, 1, &error); g_assert(!error); antispoof = pg_antispoof_new("antispoof", 1, 1, EAST_SIDE, inside_mac, &error); g_assert(!error); col_east = pg_collect_new("col_east", 1, 1, &error); g_assert(!error); pg_brick_link(gen_west, antispoof, &error); g_assert(!error); pg_brick_link(antispoof, col_east, &error); g_assert(!error); /* enable ARP antispoof with a wrong IP */ pg_antispoof_arp_enable(antispoof, inside_ip); /* replay traffic */ for (i = 0; i < pkts_nb; i++) { packet = build_packet(pkts[i], pkts_size[i]); pg_brick_poll(gen_west, &packet_count, &error); g_assert(!error); g_assert(packet_count == 1); filtered_pkts = pg_brick_west_burst_get(col_east, &filtered_pkts_mask, &error); g_assert(!error); g_assert(pg_mask_count(filtered_pkts_mask) == 0); pg_packets_free(filtered_pkts, filtered_pkts_mask); rte_pktmbuf_free(packet); } /* disable ARP antispoof, should now pass */ pg_antispoof_arp_disable(antispoof); /* replay traffic */ for (i = 0; i < pkts_nb; i++) { packet = build_packet(pkts[i], pkts_size[i]); pg_brick_poll(gen_west, &packet_count, &error); g_assert(!error); g_assert(packet_count == 1); filtered_pkts = pg_brick_west_burst_get(col_east, &filtered_pkts_mask, &error); g_assert(!error); g_assert(pg_mask_count(filtered_pkts_mask) == 1); pg_packets_free(filtered_pkts, filtered_pkts_mask); rte_pktmbuf_free(packet); } pg_brick_destroy(gen_west); pg_brick_destroy(antispoof); pg_brick_destroy(col_east); }
.type = RTE_ACL_FIELD_TYPE_RANGE, .size = sizeof(uint16_t), .field_index = DSTP_FIELD_IPV4, .input_index = RTE_ACL_IPV4_PORTS, .offset = sizeof(struct ip) - offsetof(struct ip, ip_p) + sizeof(uint16_t) }, }; RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ipv4_defs)); const struct acl4_rules acl4_rules_in[] = { { .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1}, /* destination IPv4 */ .field[2] = {.value.u32 = IPv4(192, 168, 105, 0), .mask_range.u32 = 24,}, /* source port */ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, /* destination port */ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,} }, { .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2}, /* destination IPv4 */ .field[2] = {.value.u32 = IPv4(192, 168, 106, 0), .mask_range.u32 = 24,}, /* source port */ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}, /* destination port */ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
IPv4 IPv4::AND(IPv4 ip1, IPv4 ip2) { return IPv4(ip1._ip[0] & ip2._ip[0] , ip1._ip[1] & ip2._ip[1] , ip1._ip[2] & ip2._ip[2] , ip1._ip[3] & ip2._ip[3]); }
#include "ipsec.h" #define RT_IPV4_MAX_RULES 64 struct ipv4_route { uint32_t ip; uint8_t depth; uint8_t if_out; }; /* In the default routing table we have: * ep0 protected ports 0 and 1, and unprotected ports 2 and 3. */ static struct ipv4_route rt_ipv4_ep0[] = { { IPv4(172, 16, 2, 5), 32, 0 }, { IPv4(172, 16, 2, 6), 32, 0 }, { IPv4(172, 16, 2, 7), 32, 1 }, { IPv4(172, 16, 2, 8), 32, 1 }, { IPv4(192, 168, 115, 0), 24, 2 }, { IPv4(192, 168, 116, 0), 24, 2 }, { IPv4(192, 168, 117, 0), 24, 3 }, { IPv4(192, 168, 118, 0), 24, 3 }, { IPv4(192, 168, 210, 0), 24, 2 }, { IPv4(192, 168, 240, 0), 24, 2 }, { IPv4(192, 168, 250, 0), 24, 0 } };
/* * Test for overwriting of tbl8: * - add rule /32 and lookup * - add new rule /24 and lookup * - add third rule /25 and lookup * - lookup /32 and /24 rule to ensure the table has not been overwritten. */ int32_t test17(void) { struct rte_lpm *lpm = NULL; const uint32_t ip_10_32 = IPv4(10, 10, 10, 2); const uint32_t ip_10_24 = IPv4(10, 10, 10, 0); const uint32_t ip_20_25 = IPv4(10, 10, 20, 2); const uint8_t d_ip_10_32 = 32, d_ip_10_24 = 24, d_ip_20_25 = 25; const uint8_t next_hop_ip_10_32 = 100, next_hop_ip_10_24 = 105, next_hop_ip_20_25 = 111; uint8_t next_hop_return = 0; int32_t status = 0; lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, next_hop_ip_10_32)) < 0) return -1; status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); uint8_t test_hop_10_32 = next_hop_return; TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, next_hop_ip_10_24)) < 0) return -1; status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); uint8_t test_hop_10_24 = next_hop_return; TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, next_hop_ip_20_25)) < 0) return -1; status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return); uint8_t test_hop_20_25 = next_hop_return; TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25); if (test_hop_10_32 == test_hop_10_24) { printf("Next hop return equal\n"); return -1; } if (test_hop_10_24 == test_hop_20_25){ printf("Next hop return equal\n"); return -1; } status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); rte_lpm_free(lpm); return PASS; }
char *WhereGr(FILE *fp,char *address, char *uqhn, char *domain, char *ipv4, char *ipv6) { // This model of "where?" is based on IP addresses and portnumbers, for cloud services // alternative models for "other worlds" can be added... static char where[CGN_BUFSIZE] = {0}; char attr[CGN_BUFSIZE]; if (domain == NULL || strlen(domain) == 0) { domain = "unknown domain"; } if (ipv6 && strlen(ipv6) > 0) { snprintf(where,CGN_BUFSIZE,"host location %s.%s IPv4 %s ipv6 %s",uqhn,domain,ipv4,ipv6); } else { snprintf(where,CGN_BUFSIZE,"host location %s.%s IPv4 %s",uqhn,domain,ipv4); } if (address && strlen(address) > 0) { snprintf(attr,CGN_BUFSIZE,"%s,%s,%s,%s,address %s",Hostname(uqhn),Domain(domain),IPv4(ipv4),IPv6(ipv6),address); } else { snprintf(attr,CGN_BUFSIZE,"%s,%s,%s,%s",Hostname(uqhn),Domain(domain),IPv4(ipv4),IPv6(ipv6)); } RoleGr(fp,where,"where",attr, "host location identification"); RoleGr(fp,Domain(domain),"dns domain name",domain,"host location identification"); char *hostname = Hostname(uqhn); RoleGr(fp,hostname,"hostname",uqhn,"host location identification"); Gr(fp,where,a_alias,hostname,"host location identification"); // Alias for quick association Gr(fp,Domain(domain),a_contains,hostname,"host location identification"); char *identity = HostID(uqhn); Gr(fp,hostname,a_alias,identity,"host location identification"); RoleGr(fp,IPv4(ipv4),"ipv4 address", ipv4,"host location identification"); Gr(fp,where,a_alias,IPv4(ipv4),"host location identification"); // Alias for quick association Gr(fp,Domain(domain),a_contains,IPv4(ipv4),"host location identification"); Gr(fp,IPv4(ipv4),a_alias,HostID(ipv4),"host location identification"); if (ipv6 && strlen(ipv6) > 0) { RoleGr(fp,IPv6(ipv6),"ipv6 address", ipv6,"host location identification"); Gr(fp,where,a_alias,IPv6(ipv6),"host location identification"); // Alias for quick association Gr(fp,Domain(domain),a_contains,IPv6(ipv6),"host location identification"); identity = HostID(ipv6); Gr(fp,IPv6(ipv6),a_alias,identity,"host location identification"); Gr(fp,hostname,a_alias,IPv6(ipv6),"host location identification"); } if (address && address > 0) { char addressx[CGN_BUFSIZE]; snprintf(addressx,CGN_BUFSIZE,"description address %s",address); RoleGr(fp,addressx,"description address",address,"host location identification"); Gr(fp,Domain(domain),a_origin,addressx,"host location identification"); Gr(fp,"description address",a_related_to,"street address","host location identification"); } Gr(fp,hostname,a_alias,IPv4(ipv4),"host location identification"); return where; }
/* * - Add rule that covers a TBL24 range previously invalid & lookup (& delete & * lookup) * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup) * - Add rule that extends a TBL24 valid entry & lookup for both rules (& * delete & lookup) * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup) * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup) * - Delete a rule that is not present in the TBL24 & lookup * - Delete a rule that is not present in the TBL8 & lookup * */ int32_t test10(void) { struct rte_lpm *lpm = NULL; uint32_t ip; uint8_t depth, next_hop_add, next_hop_return; int32_t status = 0; /* Add rule that covers a TBL24 range previously invalid & lookup * (& delete & lookup) */ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); TEST_LPM_ASSERT(lpm != NULL); ip = IPv4(128, 0, 0, 0); depth = 16; next_hop_add = 100; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_delete_all(lpm); ip = IPv4(128, 0, 0, 0); depth = 25; next_hop_add = 100; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); rte_lpm_delete_all(lpm); /* Add rule that extends a TBL24 valid entry & lookup for both rules * (& delete & lookup) */ ip = IPv4(128, 0, 0, 0); depth = 24; next_hop_add = 100; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); ip = IPv4(128, 0, 0, 10); depth = 32; next_hop_add = 101; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); ip = IPv4(128, 0, 0, 0); next_hop_add = 100; status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); ip = IPv4(128, 0, 0, 0); depth = 24; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); ip = IPv4(128, 0, 0, 10); depth = 32; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_delete_all(lpm); /* Add rule that updates the next hop in TBL24 & lookup * (& delete & lookup) */ ip = IPv4(128, 0, 0, 0); depth = 24; next_hop_add = 100; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); next_hop_add = 101; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_delete_all(lpm); /* Add rule that updates the next hop in TBL8 & lookup * (& delete & lookup) */ ip = IPv4(128, 0, 0, 0); depth = 32; next_hop_add = 100; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); next_hop_add = 101; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_delete_all(lpm); /* Delete a rule that is not present in the TBL24 & lookup */ ip = IPv4(128, 0, 0, 0); depth = 24; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status < 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_delete_all(lpm); /* Delete a rule that is not present in the TBL8 & lookup */ ip = IPv4(128, 0, 0, 0); depth = 32; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status < 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_free(lpm); return PASS; }
/* * - Add & lookup to hit invalid TBL24 entry * - Add & lookup to hit valid TBL24 entry not extended * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry * */ int32_t test9(void) { struct rte_lpm *lpm = NULL; uint32_t ip, ip_1, ip_2; uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return; int32_t status = 0; /* Add & lookup to hit invalid TBL24 entry */ ip = IPv4(128, 0, 0, 0); depth = 24; next_hop_add = 100; lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_delete_all(lpm); /* Add & lookup to hit valid TBL24 entry not extended */ ip = IPv4(128, 0, 0, 0); depth = 23; next_hop_add = 100; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); depth = 24; next_hop_add = 101; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); depth = 24; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); depth = 23; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_delete_all(lpm); /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8 * entry */ ip = IPv4(128, 0, 0, 0); depth = 32; next_hop_add = 100; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); ip = IPv4(128, 0, 0, 5); depth = 32; next_hop_add = 101; status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); ip = IPv4(128, 0, 0, 0); depth = 32; next_hop_add = 100; status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_delete_all(lpm); /* Add & lookup to hit valid extended TBL24 entry with valid TBL8 * entry */ ip_1 = IPv4(128, 0, 0, 0); depth_1 = 25; next_hop_add_1 = 101; ip_2 = IPv4(128, 0, 0, 5); depth_2 = 32; next_hop_add_2 = 102; next_hop_return = 0; status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip_1, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip_2, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2)); status = rte_lpm_delete(lpm, ip_2, depth_2); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip_2, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); status = rte_lpm_delete(lpm, ip_1, depth_1); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip_1, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); rte_lpm_free(lpm); return PASS; }
/* * Use rte_lpm_add to add rules which effect only the second half of the lpm * table. Use all possible depths ranging from 1..32. Set the next hop = to the * depth. Check lookup hit for on every add and check for lookup miss on the * first half of the lpm table after each add. Finally delete all rules going * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each * delete. The lookup should return the next_hop_add value related to the * previous depth value (i.e. depth -1). */ int32_t test8(void) { __m128i ipx4; uint16_t hop[4]; struct rte_lpm *lpm = NULL; uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0); uint8_t depth, next_hop_add, next_hop_return; int32_t status = 0; lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); /* Loop with rte_lpm_add. */ for (depth = 1; depth <= 32; depth++) { /* Let the next_hop_add value = depth. Just for change. */ next_hop_add = depth; status = rte_lpm_add(lpm, ip2, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); /* Check IP in first half of tbl24 which should be empty. */ status = rte_lpm_lookup(lpm, ip1, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); status = rte_lpm_lookup(lpm, ip2, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1); rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); TEST_LPM_ASSERT(hop[0] == UINT16_MAX); TEST_LPM_ASSERT(hop[1] == next_hop_add); TEST_LPM_ASSERT(hop[2] == UINT16_MAX); TEST_LPM_ASSERT(hop[3] == next_hop_add); } /* Loop with rte_lpm_delete. */ for (depth = 32; depth >= 1; depth--) { next_hop_add = (uint8_t) (depth - 1); status = rte_lpm_delete(lpm, ip2, depth); TEST_LPM_ASSERT(status == 0); status = rte_lpm_lookup(lpm, ip2, &next_hop_return); if (depth != 1) { TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); } else { TEST_LPM_ASSERT(status == -ENOENT); } status = rte_lpm_lookup(lpm, ip1, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2); rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); if (depth != 1) { TEST_LPM_ASSERT(hop[0] == next_hop_add); TEST_LPM_ASSERT(hop[1] == next_hop_add); } else { TEST_LPM_ASSERT(hop[0] == UINT16_MAX); TEST_LPM_ASSERT(hop[1] == UINT16_MAX); } TEST_LPM_ASSERT(hop[2] == UINT16_MAX); TEST_LPM_ASSERT(hop[3] == UINT16_MAX); } rte_lpm_free(lpm); return PASS; }
std::string str() const { return IPv4(host)+":"+boost::lexical_cast<std::string>(port); }