/* Calculate RSS hash for IPv4/6 */
static inline uint32_t
do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
{
	uint32_t input_len;
	void *tuple;
	struct rte_ipv4_tuple ipv4_tuple;
	struct rte_ipv6_tuple ipv6_tuple;
	struct ipv4_hdr *ipv4_hdr;
	struct ipv6_hdr *ipv6_hdr;

	mtoip(m, &ipv4_hdr, &ipv6_hdr);

	if (ipv4_hdr) {
		ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
		ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
		tuple = &ipv4_tuple;
		input_len = RTE_THASH_V4_L3_LEN;
	} else if (ipv6_hdr) {
		rte_thash_load_v6_addrs(ipv6_hdr,
					(union rte_thash_tuple *)&ipv6_tuple);
		tuple = &ipv6_tuple;
		input_len = RTE_THASH_V6_L3_LEN;
	} else
		return 0;

	return rte_softrss_be(tuple, input_len, rss_key_be);
}
Exemplo n.º 2
0
/* byteswap to cpu or network order */
static void
bswap_test_data(struct ipv4_7tuple *data, int len, int to_be)
{
	int i;

	for (i = 0; i < len; i++) {

		if (to_be) {
			/* swap all bytes so that they are in network order */
			data[i].ip_dst = rte_cpu_to_be_32(data[i].ip_dst);
			data[i].ip_src = rte_cpu_to_be_32(data[i].ip_src);
			data[i].port_dst = rte_cpu_to_be_16(data[i].port_dst);
			data[i].port_src = rte_cpu_to_be_16(data[i].port_src);
			data[i].vlan = rte_cpu_to_be_16(data[i].vlan);
			data[i].domain = rte_cpu_to_be_16(data[i].domain);
		} else {
			data[i].ip_dst = rte_be_to_cpu_32(data[i].ip_dst);
			data[i].ip_src = rte_be_to_cpu_32(data[i].ip_src);
			data[i].port_dst = rte_be_to_cpu_16(data[i].port_dst);
			data[i].port_src = rte_be_to_cpu_16(data[i].port_src);
			data[i].vlan = rte_be_to_cpu_16(data[i].vlan);
			data[i].domain = rte_be_to_cpu_16(data[i].domain);
		}
	}
}
Exemplo n.º 3
0
int
rate_limit_address(cmdline_ipaddr_t* ip, uint32_t num, int socket_id)
{
	int i, res;
	uint32_t netmask, netaddr, maxhost, j;

	res = 0;
	if (ip->family == AF_INET) {
		if (ip->prefixlen > 0) {
			// rate limit range
			netmask = ~(UINT32_MAX >> ip->prefixlen);
			netaddr =
			    rte_be_to_cpu_32(ip->addr.ipv4.s_addr) & netmask;
			maxhost = netaddr + (1 << (32 - ip->prefixlen));
			if (socket_id == SOCKET_ID_ANY) {
				for (i = 0; i < NB_SOCKETS; i++) {
					for (j = netaddr; j < maxhost; j++) {
						rate_limit_ipv4(
						    (union rlimit_addr*)&j, num,
						    i);
					}
				}
			} else {
				for (j = netaddr; j < maxhost; j++) {
					rate_limit_ipv4((union rlimit_addr*)&j,
							num, socket_id);
				}
			}
		} else {
Exemplo n.º 4
0
int
arp_input(struct rte_mbuf *m)
{
    struct arp_hdr *arph;
    int rc;

    arph = rte_pktmbuf_mtod(m, struct arp_hdr *);
    if (ust_ip_addr != rte_be_to_cpu_32(arph->arp_data.arp_tip)) {
        goto out;
    }

    switch (rte_be_to_cpu_16(arph->arp_op)) {
    default:
        rc = -EINVAL;
        break;
    case ARP_OP_REQUEST:
        rc = process_request(arph);
        break;
    case ARP_OP_REPLY:
        break;
    }

out:
    rte_pktmbuf_free(m);
    return rc;
}
inline void
cm_update_bulk_mbuf(struct count_min_t *cm, struct rte_mbuf **mbufs, size_t n) {
    static uint64_t h[RTE_PORT_IN_BURST_SIZE_MAX] __rte_cache_aligned;
    static size_t l[RTE_PORT_IN_BURST_SIZE_MAX] __rte_cache_aligned;
    static uint64_t j[RTE_PORT_IN_BURST_SIZE_MAX * CM_DEPTH] __rte_cache_aligned;

    // for(size_t i = 0; i < n; i++) {
        // h[i] = hash_crc_key16((void *)&hc[i], 0, 0);
    // }
    for(size_t i = 0; i < n; i++) {
        struct ether_hdr* eth_hdr = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *);
        struct ipv4_hdr *ipv4_hdr =
            (struct ipv4_hdr*) ((uint8_t *)eth_hdr + sizeof(struct ether_hdr));

        h[i] = rte_be_to_cpu_32(ipv4_hdr->src_addr);
        l[i] = mbufs[i]->pkt_len;
    }

    for(size_t p_i = 0; p_i < n; p_i++) {
        for (size_t i = 0; i < CM_DEPTH; i++) {
            j[p_i * CM_DEPTH + i] = (cm->a[i] * h[p_i] + cm->b[i]) & (CM_WIDTH - 1);
        }
    }

    for(size_t p_i = 0; p_i < n; p_i++) {
        for(size_t i = 0; i < CM_DEPTH; i++) {
            cm->counters[i][j[p_i * CM_DEPTH + i]] += l[p_i];
        }
    }
}
Exemplo n.º 6
0
int
control_add_ipv4_local_entry(struct in_addr* nexthop,
			     struct in_addr* saddr,
			     uint8_t depth,
			     uint32_t port_id,
			     int32_t socket_id)
{
	int s;
	uint16_t nexthop_id;

	s = neighbor4_lookup_nexthop(neighbor4_struct[socket_id], nexthop,
				     &nexthop_id);
	if (s < 0) {
		s = neighbor4_add_nexthop(neighbor4_struct[socket_id], nexthop,
					  &nexthop_id, NEI_ACTION_KNI);
		if (s < 0) {
			RTE_LOG(
			    ERR, PKTJ_CTRL1,
			    "failed to add a nexthop during route adding...\n");
			return -1;
		}
	}
	neighbor4_set_port(neighbor4_struct[socket_id], nexthop_id, port_id);
	s = rte_lpm_add(ipv4_pktj_lookup_struct[socket_id],
			rte_be_to_cpu_32(saddr->s_addr), depth, nexthop_id);
	if (s < 0) {
		RTE_LOG(
		    ERR, PKTJ_CTRL1,
		    "failed to add a route in lpm during route adding...\n");
		return -1;
	}
	neighbor4_refcount_incr(neighbor4_struct[socket_id], nexthop_id);
	return nexthop_id;
}
Exemplo n.º 7
0
static void
ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf)
{
	uint32_t ipv4_addr;

	ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
	sprintf(buf, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
		(ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
		ipv4_addr & 0xFF);
}
Exemplo n.º 8
0
/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
 * without advanced filter support.
 */
void
copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
	     __rte_unused struct rte_eth_fdir_masks *masks)
{
	fltr->type = FILTER_IPV4_5TUPLE;
	fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
		input->flow.ip4_flow.src_ip);
	fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
		input->flow.ip4_flow.dst_ip);
	fltr->u.ipv4.src_port = rte_be_to_cpu_16(
		input->flow.udp4_flow.src_port);
	fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
		input->flow.udp4_flow.dst_port);

	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
		fltr->u.ipv4.protocol = PROTO_TCP;
	else
		fltr->u.ipv4.protocol = PROTO_UDP;

	fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
}
Exemplo n.º 9
0
int lpm_get_dst_port(struct rte_mbuf *m, int socketid) {
    struct ether_hdr *eth_hdr;
    struct ipv4_hdr *ipv4_hdr;

    eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);

    if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
        /* Handle IPv4 headers.*/
        ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
                sizeof(struct ether_hdr));
	
        return lpm_entry_lookup(rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr), socketid);
    }
Exemplo n.º 10
0
static int
neighbor4(neighbor_action_t action,
	  __s32 port_id,
	  struct in_addr* addr,
	  struct ether_addr* lladdr,
	  __u8 flags,
	  __rte_unused __u16 vlan_id,
	  void* args)
{
	// if port_id is not handled
	//   ignore, return immediatly
	// if neighbor add
	//   lookup neighbor
	//   if exists
	//     update lladdr, set flag as REACHABLE/STALE/DELAY
	//   else
	//     // This should not happen
	//     insert new nexthop
	//     set insert date=now, refcount = 0, flag=REACHABLE/STALE/DELAY
	// if neighbor delete
	//   lookup neighbor
	//   if exists
	//     if refcount != 0
	//       set nexthop as invalid
	//     else
	//       set flag empty
	//   else
	//     do nothing
	//     // this should not happen

	struct control_handle* handle = args;
	assert(handle != NULL);
	int s;
	uint16_t nexthop_id;
    uint32_t find_id;
	int32_t socket_id = handle->socket_id;
	char ipbuf[INET_ADDRSTRLEN];

	assert(neighbor4_struct != NULL);

	if (addr == NULL)
		return -1;
	inet_ntop(AF_INET, addr, ipbuf, INET_ADDRSTRLEN);

	if (action == NEIGHBOR_ADD) {
		if (lladdr == NULL)
			return -1;
		char ibuf[IFNAMSIZ];
		unsigned kni_vlan;

		if_indextoname(port_id, ibuf);
		s = sscanf(ibuf, "dpdk%10u.%10u", &port_id, &kni_vlan);
		if (s <= 0) {
			RTE_LOG(ERR, PKTJ_CTRL1,
				"received a neighbor "
				"announce for an unmanaged "
				"iface %s\n",
				ibuf);
			return -1;
		}

		s = neighbor4_lookup_nexthop(neighbor4_struct[socket_id], addr,
					     &nexthop_id);
		if (s < 0) {
			if (flags != NUD_NONE && flags != NUD_NOARP &&
			    flags != NUD_STALE) {
				RTE_LOG(ERR, PKTJ_CTRL1,
					"failed to change state in neighbor4 "
					"table (state %d, %s)...\n",
					flags, ipbuf);
				return -1;
			}

			{
				RTE_LOG(DEBUG, PKTJ_CTRL1,
					"adding ipv4 neighbor %s with port %s "
					"vlan_id %d...\n",
					ipbuf, ibuf, kni_vlan);
			}

			s = neighbor4_add_nexthop(neighbor4_struct[socket_id],
						  addr, &nexthop_id,
						  NEI_ACTION_FWD);
			if (s < 0) {
				RTE_LOG(ERR, PKTJ_CTRL1,
					"failed to add a "
					"nexthop in neighbor "
					"table...\n");
				return -1;
			}

			if (rte_lpm_lookup(ipv4_pktj_lookup_struct[socket_id],
					   rte_be_to_cpu_32(addr->s_addr),
					   &find_id) == 0) {
				s = rte_lpm_add(
				    ipv4_pktj_lookup_struct[socket_id],
				    rte_be_to_cpu_32(addr->s_addr), 32,
				    nexthop_id);
				if (s < 0) {
					lpm4_stats[socket_id].nb_add_ko++;
					RTE_LOG(ERR, PKTJ_CTRL1,
						"failed to add a route in "
						"lpm during neighbor "
						"adding...\n");
					return -1;
				}
				lpm4_stats[socket_id].nb_add_ok++;
			}
		}

		if (flags == NUD_FAILED) {
			neighbor4_set_action(neighbor4_struct[socket_id],
					     nexthop_id, NEI_ACTION_KNI);
		} else {
			neighbor4_set_action(neighbor4_struct[socket_id],
					     nexthop_id, NEI_ACTION_FWD);
		}
		RTE_LOG(DEBUG, PKTJ_CTRL1,
			"set neighbor4 with port_id %d state %d\n", port_id,
			flags);
		neighbor4_set_lladdr_port(neighbor4_struct[socket_id],
					  nexthop_id, &ports_eth_addr[port_id],
					  lladdr, port_id, kni_vlan);
		neighbor4_set_state(neighbor4_struct[socket_id], nexthop_id,
				    flags);
	}
	if (action == NEIGHBOR_DELETE) {
		if (flags != NUD_FAILED && flags != NUD_STALE) {
			RTE_LOG(
			    DEBUG, PKTJ_CTRL1,
			    "neighbor4 delete ope failed, bad NUD state: %d \n",
			    flags);
			return -1;
		}

		RTE_LOG(DEBUG, PKTJ_CTRL1, "deleting ipv4 neighbor...\n");
		s = neighbor4_lookup_nexthop(neighbor4_struct[socket_id], addr,
					     &nexthop_id);
		if (s < 0) {
			RTE_LOG(ERR, PKTJ_CTRL1,
				"failed to find a nexthop to "
				"delete in neighbor "
				"table...\n");
			return 0;
		}
		neighbor4_delete(neighbor4_struct[socket_id], nexthop_id);
		// FIXME not thread safe
		if (neighbor4_struct[socket_id]
			->entries.t4[nexthop_id]
			.neighbor.refcnt == 0) {
			s = rte_lpm_delete(ipv4_pktj_lookup_struct[socket_id],
					   rte_be_to_cpu_32(addr->s_addr), 32);
			if (s < 0) {
				lpm4_stats[socket_id].nb_del_ko++;
				RTE_LOG(ERR, PKTJ_CTRL1,
					"failed to delete route...\n");
				return -1;
			}
			lpm4_stats[socket_id].nb_del_ok++;
		}
	}
	RTE_LOG(DEBUG, PKTJ_CTRL1, "neigh %s ope success\n", ipbuf);
	return 0;
}
Exemplo n.º 11
0
int
app_pipeline_fc_load_file_ipv4(char *filename,
	struct pipeline_fc_key *keys,
	uint32_t *port_ids,
	uint32_t *flow_ids,
	uint32_t *n_keys,
	uint32_t *line)
{
	FILE *f = NULL;
	char file_buf[1024];
	uint32_t i, l;

	/* Check input arguments */
	if ((filename == NULL) ||
		(keys == NULL) ||
		(port_ids == NULL) ||
		(flow_ids == NULL) ||
		(n_keys == NULL) ||
		(*n_keys == 0) ||
		(line == NULL)) {
		if (line)
			*line = 0;
		return -1;
		}

	/* Open input file */
	f = fopen(filename, "r");
	if (f == NULL) {
		*line = 0;
		return -1;
	}

	/* Read file */
	for (i = 0, l = 1; i < *n_keys; l++) {
		char *tokens[32];
		uint32_t n_tokens = RTE_DIM(tokens);

		struct in_addr sipaddr, dipaddr;
		uint16_t sport, dport;
		uint8_t proto;
		uint32_t portid, flowid;
		int status;

		if (fgets(file_buf, sizeof(file_buf), f) == NULL)
			break;

		status = parse_tokenize_string(file_buf, tokens, &n_tokens);
		if (status)
			goto error2;

		if ((n_tokens == 0) || (tokens[0][0] == '#'))
			continue;

		if ((n_tokens != 10) ||
			strcmp(tokens[0], "ipv4") ||
			parse_ipv4_addr(tokens[1], &sipaddr) ||
			parse_ipv4_addr(tokens[2], &dipaddr) ||
			parser_read_uint16(&sport, tokens[3]) ||
			parser_read_uint16(&dport, tokens[4]) ||
			parser_read_uint8(&proto, tokens[5]) ||
			strcmp(tokens[6], "port") ||
			parser_read_uint32(&portid, tokens[7]) ||
			strcmp(tokens[8], "id") ||
			parser_read_uint32(&flowid, tokens[9]))
			goto error2;

		keys[i].type = FLOW_KEY_IPV4_5TUPLE;
		keys[i].key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
		keys[i].key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
		keys[i].key.ipv4_5tuple.port_src = sport;
		keys[i].key.ipv4_5tuple.port_dst = dport;
		keys[i].key.ipv4_5tuple.proto = proto;

		port_ids[i] = portid;
		flow_ids[i] = flowid;

		if (app_pipeline_fc_key_check(&keys[i]))
			goto error2;

		i++;
	}

	/* Close file */
	*n_keys = i;
	fclose(f);
	return 0;

error2:
	*line = l;
	fclose(f);
	return -1;
}
Exemplo n.º 12
0
static int
route4(__rte_unused struct rtmsg* route,
       route_action_t action,
       struct in_addr* addr,
       uint8_t depth,
       struct in_addr* nexthop,
       uint8_t type,
       void* args)
{
	// If route add
	//   lookup next hop in neighbor table ipv4
	//   if not lookup
	//     create next hop, with flag invalid and addr = nexthop
	//   nexthopid = last id
	//
	//   register new route in lpm, with nexthop id
	//   increment refcount in neighbor
	// If route delete
	//   lookup next hop in neighbor table ipv4
	//   if not lookup
	//     then WTF TABLE CORRUPTED
	//   remove route from lpm
	//   decrement refcount in neighbor
	//   if refcount reached 0
	//     then flag entry empty

	struct control_handle* handle = args;
	assert(handle != NULL);
	uint16_t nexthop_id;
	int s;
	int32_t socket_id = handle->socket_id;
	struct in_addr blackhole_addr4 = {rte_be_to_cpu_32(INADDR_ANY)};

	if (type == RTN_BLACKHOLE) {
		nexthop = &blackhole_addr4;
	}

	if (action == ROUTE_ADD) {
		RTE_LOG(DEBUG, PKTJ_CTRL1, "adding an ipv4 route...\n");
		// lookup nexthop
		s = neighbor4_lookup_nexthop(neighbor4_struct[socket_id],
					     nexthop, &nexthop_id);
		if (s < 0) {
			s = neighbor4_add_nexthop(neighbor4_struct[socket_id],
						  nexthop, &nexthop_id,
						  NEI_ACTION_FWD);
			if (s < 0) {
				RTE_LOG(ERR, PKTJ_CTRL1,
					"failed to add a "
					"nexthop during "
					"route adding...\n");
				return -1;
			}
		}
		s = rte_lpm_add(ipv4_pktj_lookup_struct[socket_id],
				rte_be_to_cpu_32(addr->s_addr), depth,
				nexthop_id);
		if (s < 0) {
			lpm4_stats[socket_id].nb_add_ko++;
			RTE_LOG(ERR, PKTJ_CTRL1,
				"failed to add a route in "
				"lpm during route "
				"adding...\n");
			return -1;
		}
		neighbor4_refcount_incr(neighbor4_struct[socket_id],
					nexthop_id);
		lpm4_stats[socket_id].nb_add_ok++;
	}

	if (action == ROUTE_DELETE) {
		RTE_LOG(DEBUG, PKTJ_CTRL1, "deleting an ipv4 route...\n");
		// lookup nexthop
		s = neighbor4_lookup_nexthop(neighbor4_struct[socket_id],
					     nexthop, &nexthop_id);
		if (s < 0) {
			RTE_LOG(ERR, PKTJ_CTRL1,
				"failed to find nexthop "
				"during route deletion...\n");
			return -1;
		}

		s = rte_lpm_delete(ipv4_pktj_lookup_struct[socket_id],
				   rte_be_to_cpu_32(addr->s_addr), depth);
		if (s < 0) {
			lpm4_stats[socket_id].nb_del_ko++;
			RTE_LOG(ERR, PKTJ_CTRL1, "failed to delete route...\n");
			return -1;
		}
		neighbor4_refcount_decr(neighbor4_struct[socket_id],
					nexthop_id);
		lpm4_stats[socket_id].nb_del_ok++;
	}
	RTE_LOG(DEBUG, PKTJ_CTRL1, "route ope success\n");
	return 0;
}
Exemplo n.º 13
0
			if (socket_id == SOCKET_ID_ANY) {
				for (i = 0; i < NB_SOCKETS; i++) {
					for (j = netaddr; j < maxhost; j++) {
						rate_limit_ipv4(
						    (union rlimit_addr*)&j, num,
						    i);
					}
				}
			} else {
				for (j = netaddr; j < maxhost; j++) {
					rate_limit_ipv4((union rlimit_addr*)&j,
							num, socket_id);
				}
			}
		} else {
			netaddr = rte_be_to_cpu_32(ip->addr.ipv4.s_addr);
			if (socket_id == SOCKET_ID_ANY) {
				for (i = 0; i < NB_SOCKETS; i++) {
					res += rate_limit_ipv4(
					    (union rlimit_addr*)&netaddr, num,
					    i);
				}
			} else {
				res = rate_limit_ipv4(
				    (union rlimit_addr*)&netaddr, num,
				    socket_id);
			}
		}
	} else if (ip->family == AF_INET6) {
		if (socket_id == SOCKET_ID_ANY) {  // rate limit for all sockets
			for (i = 0; i < NB_SOCKETS; i++) {
Exemplo n.º 14
0
void app_main_loop_rx_flow(void)
{
	const unsigned lcore_id = rte_lcore_id();
	struct rte_mbuf *bufs[RX_BURST_SIZE];
	struct rte_mbuf *buf;
	struct ether_hdr *eth_hdr;
	struct ipv4_hdr *ipv4_hdr;
	struct ipv6_hdr *ipv6_hdr;
	struct tcp_hdr *tcp_hdr;
	struct udp_hdr *udp_hdr;
	struct pkt_info pktinfo;
	int32_t ret;
	uint16_t i, n_rx, queueid;
	uint8_t port;

	port = 0;
	queueid = (uint16_t) app.lcore_conf[lcore_id].queue_id;
	RTE_LOG(INFO, FLOWATCHER, "[core %u] packet RX & update flow_table Ready\n", lcore_id);

	while (!app_quit_signal) {

		n_rx = rte_eth_rx_burst(port, queueid, bufs, RX_BURST_SIZE);
		if (unlikely(n_rx == 0)) {
			port++;
			if (port >= app.n_ports)
				port = 0;
			continue;
		}
		app_stat[queueid].rx_count += n_rx;

		for (i = 0; i < n_rx; i++) {
			buf = bufs[i];

			pktinfo.timestamp = rte_rdtsc();
			pktinfo.pktlen = rte_pktmbuf_pkt_len(buf);

			eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);

			/* strip vlan_hdr */
			if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
				/* struct vlan_hdr *vh = (struct vlan_hdr *) &eth_hdr[1]; */
				/* buf->ol_flags |= PKT_RX_VLAN_PKT; */
				/* buf->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci); */
				/* memmove(rte_pktmbuf_adj(buf, sizeof(struct vlan_hdr)), */
				/* 		eth_hdr, 2 * ETHER_ADDR_LEN); */
				/* eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); */
				eth_hdr = (struct ether_hdr *) rte_pktmbuf_adj(buf, sizeof(struct vlan_hdr));
			}

			if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
				/* IPv4 */
				pktinfo.type = PKT_IP_TYPE_IPV4;
				ipv4_hdr = (struct ipv4_hdr *) &eth_hdr[1];

				pktinfo.key.v4.src_ip = rte_be_to_cpu_32(ipv4_hdr->src_addr);
				pktinfo.key.v4.dst_ip = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
				pktinfo.key.v4.proto = ipv4_hdr->next_proto_id;

				switch (ipv4_hdr->next_proto_id) {
					case IPPROTO_TCP:
						tcp_hdr = (struct tcp_hdr *) &ipv4_hdr[1];
						pktinfo.key.v4.src_port = rte_be_to_cpu_16(tcp_hdr->src_port);
						pktinfo.key.v4.dst_port = rte_be_to_cpu_16(tcp_hdr->dst_port);
						break;
					case IPPROTO_UDP:
						udp_hdr = (struct udp_hdr *) &ipv4_hdr[1];
						pktinfo.key.v4.src_port = rte_be_to_cpu_16(udp_hdr->src_port);
						pktinfo.key.v4.dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
						break;
					default:
						pktinfo.key.v4.src_port = 0;
						pktinfo.key.v4.dst_port = 0;
						break;
				}

				rte_pktmbuf_free(buf);

				/* update flow_table_v4 */
				ret = update_flow_entry(app.flow_table_v4[queueid], &pktinfo);
				if (ret == 0)
					app_stat[queueid].updated_tbl_v4_count++;
				else
					app_stat[queueid].miss_updated_tbl_v4_count++;

			} else if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
				/* IPv6 */
				pktinfo.type = PKT_IP_TYPE_IPV6;
				ipv6_hdr = (struct ipv6_hdr *) &eth_hdr[1];

				rte_memcpy(pktinfo.key.v6.src_ip, ipv6_hdr->src_addr, 16);
				rte_memcpy(pktinfo.key.v6.dst_ip, ipv6_hdr->dst_addr, 16);
				pktinfo.key.v6.proto = ipv6_hdr->proto;

				switch (ipv6_hdr->proto) {
					case IPPROTO_TCP:
						tcp_hdr = (struct tcp_hdr *) &ipv6_hdr[1];
						pktinfo.key.v6.src_port = rte_be_to_cpu_16(tcp_hdr->src_port);
						pktinfo.key.v6.dst_port = rte_be_to_cpu_16(tcp_hdr->dst_port);
						break;
					case IPPROTO_UDP:
						udp_hdr = (struct udp_hdr *) &ipv6_hdr[1];
						pktinfo.key.v6.src_port = rte_be_to_cpu_16(udp_hdr->src_port);
						pktinfo.key.v6.dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
						break;
					default:
						pktinfo.key.v6.src_port = 0;
						pktinfo.key.v6.dst_port = 0;
						break;
				}

				rte_pktmbuf_free(buf);

				/* update flow_table_v6 */
				ret = update_flow_entry(app.flow_table_v6[queueid], &pktinfo);
				if (ret == 0)
					app_stat[queueid].updated_tbl_v6_count++;
				else
					app_stat[queueid].miss_updated_tbl_v6_count++;

			} else {
				/* others */
				app_stat[queueid].unknown_pkt_count++;
				rte_pktmbuf_free(buf);
				continue;
			}
		}

		port++;
		if (port >= app.n_ports)
			port = 0;
	}

	RTE_LOG(INFO, FLOWATCHER, "[core %u] packet RX & update flow_table finished\n", lcore_id);
}
Exemplo n.º 15
0
int
sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
	       struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
	       unsigned int *pkt_descs, size_t *pkt_len)
{
	uint8_t *tsoh;
	const struct tcp_hdr *th;
	efsys_dma_addr_t header_paddr;
	uint16_t packet_id;
	uint32_t sent_seq;
	struct rte_mbuf *m = *in_seg;
	size_t nh_off = m->l2_len; /* IP header offset */
	size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
	size_t header_len = m->l2_len + m->l3_len + m->l4_len;
	const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);

	idx += SFC_TSO_OPT_DESCS_NUM;

	/* Packets which have too big headers should be discarded */
	if (unlikely(header_len > SFC_TSOH_STD_LEN))
		return EMSGSIZE;

	/*
	 * The TCP header must start at most 208 bytes into the frame.
	 * If it starts later than this then the NIC won't realise
	 * it's a TCP packet and TSO edits won't be applied
	 */
	if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
		return EMSGSIZE;

	header_paddr = rte_pktmbuf_iova(m);

	/*
	 * Sometimes headers may be split across multiple mbufs. In such cases
	 * we need to glue those pieces and store them in some temporary place.
	 * Also, packet headers must be contiguous in memory, so that
	 * they can be referred to with a single DMA descriptor. EF10 has no
	 * limitations on address boundaries crossing by DMA descriptor data.
	 */
	if (m->data_len < header_len) {
		tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
		sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off);

		header_paddr = rte_malloc_virt2iova((void *)tsoh);
	} else {
		if (m->data_len == header_len) {
			*in_off = 0;
			*in_seg = m->next;
		} else {
			*in_off = header_len;
		}

		tsoh = rte_pktmbuf_mtod(m, uint8_t *);
	}

	/* Handle IP header */
	if (m->ol_flags & PKT_TX_IPV4) {
		const struct ipv4_hdr *iphe4;

		iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
		rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
		packet_id = rte_be_to_cpu_16(packet_id);
	} else if (m->ol_flags & PKT_TX_IPV6) {
		packet_id = 0;
	} else {
		return EINVAL;
	}

	/* Handle TCP header */
	th = (const struct tcp_hdr *)(tsoh + tcph_off);

	rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
	sent_seq = rte_be_to_cpu_32(sent_seq);

	efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq,
				 m->tso_segsz,
				 *pend, EFX_TX_FATSOV2_OPT_NDESCS);

	*pend += EFX_TX_FATSOV2_OPT_NDESCS;
	*pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;

	efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
				B_FALSE, (*pend)++);
	(*pkt_descs)++;
	*pkt_len -= header_len;

	return 0;
}
Exemplo n.º 16
0
Arquivo: scsi.c Projeto: emmericp/dpdk
static int
vhost_bdev_scsi_process_block(struct vhost_block_dev *bdev,
			      struct vhost_scsi_task *task)
{
	uint64_t lba, *temp64;
	uint32_t xfer_len, *temp32;
	uint16_t *temp16;
	uint8_t *cdb = (uint8_t *)task->req->cdb;

	switch (cdb[0]) {
	case SBC_READ_6:
	case SBC_WRITE_6:
		lba = (uint64_t)cdb[1] << 16;
		lba |= (uint64_t)cdb[2] << 8;
		lba |= (uint64_t)cdb[3];
		xfer_len = cdb[4];
		if (xfer_len == 0)
			xfer_len = 256;
		return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);

	case SBC_READ_10:
	case SBC_WRITE_10:
		temp32 = (uint32_t *)&cdb[2];
		lba = rte_be_to_cpu_32(*temp32);
		temp16 = (uint16_t *)&cdb[7];
		xfer_len = rte_be_to_cpu_16(*temp16);
		return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);

	case SBC_READ_12:
	case SBC_WRITE_12:
		temp32 = (uint32_t *)&cdb[2];
		lba = rte_be_to_cpu_32(*temp32);
		temp32 = (uint32_t *)&cdb[6];
		xfer_len = rte_be_to_cpu_32(*temp32);
		return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);

	case SBC_READ_16:
	case SBC_WRITE_16:
		temp64 = (uint64_t *)&cdb[2];
		lba = rte_be_to_cpu_64(*temp64);
		temp32 = (uint32_t *)&cdb[10];
		xfer_len = rte_be_to_cpu_32(*temp32);
		return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);

	case SBC_READ_CAPACITY_10: {
		uint8_t buffer[8];

		if (bdev->blockcnt - 1 > 0xffffffffULL)
			memset(buffer, 0xff, 4);
		else {
			temp32 = (uint32_t *)buffer;
			*temp32 = rte_cpu_to_be_32(bdev->blockcnt - 1);
		}
		temp32 = (uint32_t *)&buffer[4];
		*temp32 = rte_cpu_to_be_32(bdev->blocklen);
		memcpy(task->iovs[0].iov_base, buffer, sizeof(buffer));
		task->resp->status = SCSI_STATUS_GOOD;
		return sizeof(buffer);
	}

	case SBC_SYNCHRONIZE_CACHE_10:
	case SBC_SYNCHRONIZE_CACHE_16:
		task->resp->status = SCSI_STATUS_GOOD;
		return 0;
	}

	scsi_task_set_status(task, SCSI_STATUS_CHECK_CONDITION,
			     SCSI_SENSE_ILLEGAL_REQUEST,
			     SCSI_ASC_INVALID_FIELD_IN_CDB,
			     SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
	return 0;
}
Exemplo n.º 17
0
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
	struct enic_fdir_node *key;
	struct filter fltr = {0};
	int32_t pos;
	u8 do_free = 0;
	u16 old_fltr_id = 0;
	u32 flowtype_supported;
	u16 flex_bytes;
	u16 queue;

	flowtype_supported = (
		(RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
		(RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));

	flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
		(params->input.flow_ext.flexbytes[0] & 0xFF));

	if (!enic->fdir.hash ||
		(params->input.flow_ext.vlan_tci & 0xFFF) ||
		!flowtype_supported || flex_bytes ||
		params->action.behavior /* drop */) {
		enic->fdir.stats.f_add++;
		return -ENOTSUP;
	}

	queue = params->action.rx_queue;
	/* See if the key is already there in the table */
	pos = rte_hash_del_key(enic->fdir.hash, params);
	switch (pos) {
	case -EINVAL:
		enic->fdir.stats.f_add++;
		return -EINVAL;
	case -ENOENT:
		/* Add a new classifier entry */
		if (!enic->fdir.stats.free) {
			enic->fdir.stats.f_add++;
			return -ENOSPC;
		}
		key = rte_zmalloc("enic_fdir_node",
				  sizeof(struct enic_fdir_node), 0);
		if (!key) {
			enic->fdir.stats.f_add++;
			return -ENOMEM;
		}
		break;
	default:
		/* The entry is already present in the table.
		 * Check if there is a change in queue
		 */
		key = enic->fdir.nodes[pos];
		enic->fdir.nodes[pos] = NULL;
		if (unlikely(key->rq_index == queue)) {
			/* Nothing to be done */
			enic->fdir.stats.f_add++;
			pos = rte_hash_add_key(enic->fdir.hash, params);
			if (pos < 0) {
				dev_err(enic, "Add hash key failed\n");
				return pos;
			}
			enic->fdir.nodes[pos] = key;
			dev_warning(enic,
				"FDIR rule is already present\n");
			return 0;
		}

		if (likely(enic->fdir.stats.free)) {
			/* Add the filter and then delete the old one.
			 * This is to avoid packets from going into the
			 * default queue during the window between
			 * delete and add
			 */
			do_free = 1;
			old_fltr_id = key->fltr_id;
		} else {
			/* No free slots in the classifier.
			 * Delete the filter and add the modified one later
			 */
			vnic_dev_classifier(enic->vdev, CLSF_DEL,
				&key->fltr_id, NULL);
			enic->fdir.stats.free++;
		}

		break;
	}

	key->filter = *params;
	key->rq_index = queue;

	fltr.type = FILTER_IPV4_5TUPLE;
	fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
		params->input.flow.ip4_flow.src_ip);
	fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
		params->input.flow.ip4_flow.dst_ip);
	fltr.u.ipv4.src_port = rte_be_to_cpu_16(
		params->input.flow.udp4_flow.src_port);
	fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
		params->input.flow.udp4_flow.dst_port);

	if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
		fltr.u.ipv4.protocol = PROTO_TCP;
	else
		fltr.u.ipv4.protocol = PROTO_UDP;

	fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;

	if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
		key->fltr_id = queue;
	} else {
		dev_err(enic, "Add classifier entry failed\n");
		enic->fdir.stats.f_add++;
		rte_free(key);
		return -1;
	}

	if (do_free)
		vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
	else{
		enic->fdir.stats.free--;
		enic->fdir.stats.add++;
	}

	pos = rte_hash_add_key(enic->fdir.hash, params);
	if (pos < 0) {
		dev_err(enic, "Add hash key failed\n");
		return pos;
	}

	enic->fdir.nodes[pos] = key;
	return 0;
}