Exemplo n.º 1
0
/**
 * Packet IO worker thread using bursts from/to IO resources
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_ifburst_thread(void *arg)
{
	int thr;
	thread_args_t *thr_args;
	int pkts, pkts_ok;
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	int src_idx, dst_idx;
	odp_pktio_t pktio_src, pktio_dst;

	thr = odp_thread_id();
	thr_args = arg;

	stats_t *stats = calloc(1, sizeof(stats_t));
	*thr_args->stats = stats;

	src_idx = thr_args->src_idx;
	dst_idx = (src_idx % 2 == 0) ? src_idx+1 : src_idx-1;
	pktio_src = gbl_args->pktios[src_idx];
	pktio_dst = gbl_args->pktios[dst_idx];

	printf("[%02i] srcif:%s dstif:%s spktio:%02" PRIu64
	       " dpktio:%02" PRIu64 " BURST mode\n",
	       thr,
	       gbl_args->appl.if_names[src_idx],
	       gbl_args->appl.if_names[dst_idx],
	       odp_pktio_to_u64(pktio_src), odp_pktio_to_u64(pktio_dst));
	odp_barrier_wait(&barrier);

	/* Loop packets */
	while (!exit_threads) {
		pkts = odp_pktio_recv(pktio_src, pkt_tbl, MAX_PKT_BURST);
		if (pkts <= 0)
			continue;

		/* Drop packets with errors */
		pkts_ok = drop_err_pkts(pkt_tbl, pkts);
		if (pkts_ok > 0) {
			int sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts_ok);

			sent = sent > 0 ? sent : 0;
			if (odp_unlikely(sent < pkts_ok)) {
				stats->drops += pkts_ok - sent;
				do
					odp_packet_free(pkt_tbl[sent]);
				while (++sent < pkts_ok);
			}
		}

		if (odp_unlikely(pkts_ok != pkts))
			stats->drops += pkts - pkts_ok;

		if (pkts_ok == 0)
			continue;

		stats->packets += pkts_ok;
	}

	free(stats);
	return NULL;
}
Exemplo n.º 2
0
int socket(int domain, int type, int protocol)
{
	int sockfd = -1, ret_val;
	static int init_socket = 0;

	if (odp_unlikely(init_socket == 0)) {
		ret_val = ofp_libc_init();
		if (ret_val == EXIT_FAILURE)
			return sockfd;

		init_socket = 1;

		ret_val = ofp_lib_start();
		if (ret_val == EXIT_SUCCESS)
			init_socket = 2;
		else
			init_socket = 3;
	}

	if (odp_unlikely(domain != AF_INET || init_socket != 2))
		sockfd = (*libc_socket)(domain, type, protocol);
	else
		sockfd = ofp_socket(domain, type, protocol);

	OFP_DBG("Created socket '%d' with domain:%d, type:%d, protocol:%d.",
		sockfd, domain, type, protocol);

	return sockfd;
}
Exemplo n.º 3
0
int odp_eth_rx_burst(pktio_entry_t *pktio_entry, odp_packet_t *rx_pkts,
		     unsigned int nb_pkts)
{
	int nb_rx;
	struct odp_eth_dev *dev;
	uint8_t	port_id	= pktio_entry->s.pkt_odp.portid;
	uint16_t queue_id = pktio_entry->s.pkt_odp.queueid;

	dev = &odp_eth_devices[port_id];

	if (pktio_cls_enabled(pktio_entry, queue_id)) {
		odp_packet_t tmpbuf[64];
		odp_packet_t onepkt;
		odp_packet_hdr_t *pkt_hdr;
		odp_pktio_t id;
		int i, j;

		if (nb_pkts > 64)
			nb_pkts = 64;

		nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
					     (void **)tmpbuf, nb_pkts);
		if (odp_unlikely(nb_rx <= 0))
			return nb_rx;

		id = pktio_entry->s.handle;
		for (i = 0, j = 0; i < nb_rx; i++) {
			onepkt = tmpbuf[i];
			pkt_hdr = odp_packet_hdr(onepkt);
			pkt_hdr->input = id;
			packet_parse_reset(pkt_hdr);
			packet_parse_l2(pkt_hdr);
			if (0 > _odp_packet_classifier(pktio_entry,
						       queue_id, onepkt)) {
				rx_pkts[j++] = onepkt;
			}
		}

		nb_rx = j;
	} else {
		nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
					     (void **)rx_pkts, nb_pkts);
	}

#ifdef ODP_ETHDEV_RXTX_CALLBACKS
	struct odp_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];

	if (odp_unlikely(cb)) {
		do {
			nb_rx = cb->fn.rx(port_id, queue_id,
					  (void **)rx_pkts, nb_rx, nb_pkts,
					  cb->param);
			cb = cb->next;
		} while (cb);
	}
#endif

	return nb_rx;
}
Exemplo n.º 4
0
odp_buffer_t buffer_alloc(void *pool, size_t size)
{
	struct pool_entry_s *pool_s = &((pool_entry_t *)pool)->s;
	uintmax_t totsize = size + pool_s->room_size;
	odp_buffer_hdr_t *buf;

	/* Reject oversized allocation requests */
	if (odp_unlikely(size > pool_s->max_size))
		return ODP_BUFFER_INVALID;

	/* Try to satisfy request from the local cache */
	buf = get_local_buf(&pool_s->local_cache[local_id], pool_s, totsize);

	/* If cache is empty, satisfy request from the pool */
	if (odp_unlikely(buf == NULL)) {
		buf = get_buf(pool_s);

		if (odp_unlikely(buf == NULL))
			return ODP_BUFFER_INVALID;

		/* Get blocks for this buffer, if pool uses application data */
		if (buf->size < totsize) {
			intmax_t needed = totsize - buf->size;

			do {
				uint8_t *blk = get_blk(pool_s);

				if (blk == NULL) {
					ret_buf(pool_s, buf);
					return ODP_BUFFER_INVALID;
				}
				buf->addr[buf->segcount++] = blk;
				needed -= pool_s->seg_size;
			} while (needed > 0);
			buf->size = buf->segcount * pool_s->seg_size;

			/* Record the hdr before the buffer head */
			*(unsigned long *)(buf->addr[0] -
			      ODP_HDR_BACK_PTR_SIZE) = (unsigned long)buf;
		}
	}

	/* Mark buffer as allocated */
	buf->allocator = local_id;

	/* By default, buffers inherit their pool's zeroization setting */
	buf->flags.zeroized = pool_s->flags.zeroized;

	/* By default, buffers are not associated with an ordered queue */
	buf->origin_qe = NULL;

	return (odp_buffer_t)buf;
}
Exemplo n.º 5
0
odp_buffer_t buffer_alloc(odp_pool_t pool_hdl, size_t size)
{
	uint32_t pool_id = pool_handle_to_index(pool_hdl);
	pool_entry_t *pool = get_pool_entry(pool_id);
	uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom;
	odp_anybuf_t *buf;

	/* Reject oversized allocation requests */
	if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) ||
	    (!pool->s.flags.unsegmented &&
	     totsize > pool->s.seg_size * ODP_BUFFER_MAX_SEG))
		return ODP_BUFFER_INVALID;

	/* Try to satisfy request from the local cache */
	buf = (odp_anybuf_t *)(void *)get_local_buf(&local_cache[pool_id],
						    &pool->s, totsize);

	/* If cache is empty, satisfy request from the pool */
	if (odp_unlikely(buf == NULL)) {
		buf = (odp_anybuf_t *)(void *)get_buf(&pool->s);

		if (odp_unlikely(buf == NULL))
			return ODP_BUFFER_INVALID;

		/* Get blocks for this buffer, if pool uses application data */
		if (buf->buf.size < totsize) {
			intmax_t needed = totsize - buf->buf.size;
			do {
				uint8_t *blk = get_blk(&pool->s);
				if (blk == NULL) {
					ret_buf(&pool->s, &buf->buf);
					return ODP_BUFFER_INVALID;
				}
				buf->buf.addr[buf->buf.segcount++] = blk;
				needed -= pool->s.seg_size;
			} while (needed > 0);
			buf->buf.size = buf->buf.segcount * pool->s.seg_size;
		}
	}

	/* By default, buffers inherit their pool's zeroization setting */
	buf->buf.flags.zeroized = pool->s.flags.zeroized;

	if (buf->buf.type == ODP_EVENT_PACKET)
		packet_init(pool, &buf->pkt, size);

	return odp_hdr_to_buf(&buf->buf);
}
Exemplo n.º 6
0
enum ofp_return_code ofp_ipv6_processing(odp_packet_t *pkt)
{
	int res;
	int protocol = IS_IPV6;
	uint32_t flags;
	struct ofp_ip6_hdr *ipv6;
	struct ofp_nh6_entry *nh;
	struct ofp_ifnet *dev = odp_packet_user_ptr(*pkt);
	int is_ours = 0;

	ipv6 = (struct ofp_ip6_hdr *)odp_packet_l3_ptr(*pkt, NULL);

	if (odp_unlikely(ipv6 == NULL))
		return OFP_PKT_DROP;

	/* is ipv6->dst_addr one of my IPv6 addresses from this interface*/
	if (ofp_ip6_equal(dev->ip6_addr, ipv6->ip6_dst.ofp_s6_addr) ||
		OFP_IN6_IS_SOLICITED_NODE_MC(ipv6->ip6_dst, dev->ip6_addr) ||
		(memcmp((const void *)((uintptr_t)dev->link_local + 8),
		(const void *)((uintptr_t)ipv6->ip6_dst.ofp_s6_addr + 8),
			2 * sizeof(uint32_t)) == 0)) {

			is_ours = 1;
	}
	/* check if it's ours for another ipv6 address */
	if (!is_ours) {
		nh = ofp_get_next_hop6(dev->vrf, ipv6->ip6_dst.ofp_s6_addr, &flags);
		if (nh && (nh->flags & OFP_RTF_LOCAL))
			is_ours = 1;
	}

	if (is_ours) {
		OFP_HOOK(OFP_HOOK_LOCAL, *pkt, &protocol, &res);
		if (res != OFP_PKT_CONTINUE) {
			OFP_DBG("OFP_HOOK_LOCAL returned %d", res);
			return res;
		}

		OFP_HOOK(OFP_HOOK_LOCAL_IPv6, *pkt, NULL, &res);
		if (res != OFP_PKT_CONTINUE) {
			OFP_DBG("OFP_HOOK_LOCAL_IPv6 returned %d", res);
			return res;
		}

		return ipv6_transport_classifier(pkt, ipv6->ofp_ip6_nxt);

	}

	OFP_HOOK(OFP_HOOK_FWD_IPv6, *pkt, NULL, &res);
	if (res != OFP_PKT_CONTINUE) {
		OFP_DBG("OFP_HOOK_FWD_IPv6 returned %d", res);
		return res;
	}

	nh = ofp_get_next_hop6(dev->vrf, ipv6->ip6_dst.ofp_s6_addr, &flags);
	if (nh == NULL)
		return OFP_PKT_CONTINUE;

	return ofp_ip6_output(*pkt, nh);
}
Exemplo n.º 7
0
static int send_packets(odp_pktout_queue_t pktout,
			odp_packet_t *pkt_tbl, unsigned pkts)
{
	unsigned tx_drops;
	unsigned sent = 0;

	if (pkts == 0)
		return 0;

	while (sent < pkts) {
		int ret;

		ret = odp_pktout_send(pktout, &pkt_tbl[sent], pkts - sent);

		if (odp_likely(ret > 0))
			sent += ret;
		else
			break;
	}

	tx_drops = pkts - sent;

	if (odp_unlikely(tx_drops))
		odp_packet_free_multi(&pkt_tbl[sent], tx_drops);

	return sent;
}
Exemplo n.º 8
0
int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
{
	int sched = 0;

	LOCK(&queue->s.lock);
	if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
		UNLOCK(&queue->s.lock);
		ODP_ERR("Bad queue status\n");
		return -1;
	}

	if (queue->s.head == NULL) {
		/* Empty queue */
		queue->s.head = buf_hdr;
		queue->s.tail = buf_hdr;
		buf_hdr->next = NULL;
	} else {
		queue->s.tail->next = buf_hdr;
		queue->s.tail = buf_hdr;
		buf_hdr->next = NULL;
	}

	if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
		queue->s.status = QUEUE_STATUS_SCHED;
		sched = 1; /* retval: schedule queue */
	}
	UNLOCK(&queue->s.lock);

	/* Add queue to scheduling */
	if (sched && schedule_queue(queue))
		ODP_ABORT("schedule_queue failed\n");

	return 0;
}
Exemplo n.º 9
0
int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
{
	int sched = 0;

	LOCK(queue);
	int status = LOAD_S32(queue->s.status);
	if (odp_unlikely(status < QUEUE_STATUS_READY)) {
		UNLOCK(queue);
		ODP_ERR("Bad queue status\n");
		return -1;
	}

	if (LOAD_PTR(queue->s.head) == NULL) {
		/* Empty queue */
		STORE_PTR(queue->s.head, buf_hdr);
		STORE_PTR(queue->s.tail, buf_hdr);
		buf_hdr->next = NULL;
	} else {
		STORE_PTR(((typeof(queue->s.tail))LOAD_PTR(queue->s.tail))->next, buf_hdr);
		STORE_PTR(queue->s.tail, buf_hdr);
		buf_hdr->next = NULL;
	}

	if (status == QUEUE_STATUS_NOTSCHED) {
		STORE_S32(queue->s.status, QUEUE_STATUS_SCHED);
		sched = 1; /* retval: schedule queue */
	}
	UNLOCK(queue);

	/* Add queue to scheduling */
	if (sched)
		schedule_queue(queue);

	return 0;
}
Exemplo n.º 10
0
static pktio_entry_t *get_entry(odp_pktio_t id)
{
	if (odp_unlikely(id == ODP_PKTIO_INVALID ||
			 id > ODP_CONFIG_PKTIO_ENTRIES))
		return NULL;

	return &pktio_tbl->entries[id - 1];
}
Exemplo n.º 11
0
static void send_arp_request(struct ofp_ifnet *dev, uint32_t gw)
{
	char buf[sizeof(struct ofp_ether_vlan_header) +
		sizeof(struct ofp_arphdr)];
	struct ofp_arphdr *arp;
	struct ofp_ether_header *e1 = (struct ofp_ether_header *)buf;
	struct ofp_ether_vlan_header *e2 =
					(struct ofp_ether_vlan_header *)buf;
	size_t size;
	odp_packet_t pkt;

	memset(buf, 0, sizeof(buf));
	memset(e1->ether_dhost, 0xff, OFP_ETHER_ADDR_LEN);
	memcpy(e1->ether_shost, dev->mac, OFP_ETHER_ADDR_LEN);

	if (ETH_WITH_VLAN(dev)) {
		arp = (struct ofp_arphdr *) (e2 + 1);
		e2->evl_encap_proto = odp_cpu_to_be_16(OFP_ETHERTYPE_VLAN);
		e2->evl_tag = odp_cpu_to_be_16(dev->vlan);
		e2->evl_proto = odp_cpu_to_be_16(OFP_ETHERTYPE_ARP);
		size = sizeof(*arp) + sizeof(*e2);
	} else {
		arp = (struct ofp_arphdr *) (e1 + 1);
		e1->ether_type = odp_cpu_to_be_16(OFP_ETHERTYPE_ARP);
		size = sizeof(*arp) + sizeof(*e1);
	}

	arp->hrd = odp_cpu_to_be_16(OFP_ARPHDR_ETHER);
	arp->pro = odp_cpu_to_be_16(OFP_ETHERTYPE_IP);
	arp->hln = OFP_ETHER_ADDR_LEN;
	arp->pln = sizeof(struct ofp_in_addr);
	arp->op  = odp_cpu_to_be_16(OFP_ARPOP_REQUEST);
	memcpy(arp->eth_src, e1->ether_shost, OFP_ETHER_ADDR_LEN);
	arp->ip_src = dev->ip_addr;
	memcpy(arp->eth_dst, e1->ether_dhost, OFP_ETHER_ADDR_LEN);
	arp->ip_dst = gw;

	pkt = ofp_packet_alloc(size);
	if (pkt == ODP_PACKET_INVALID) {
		OFP_ERR("ofp_packet_alloc failed");
		return;
	}

	memcpy(odp_packet_data(pkt), buf, size);

	if (odp_unlikely(ofp_if_type(dev) == OFP_IFT_VXLAN)) {
		ofp_vxlan_send_arp_request(pkt, dev);
		return;
	}

	odp_packet_has_eth_set(pkt, 1);
	odp_packet_has_arp_set(pkt, 1);
	odp_packet_l2_offset_set(pkt, 0);
	odp_packet_l3_offset_set(pkt, size - sizeof(struct ofp_arphdr));

	if (send_pkt_out(dev, pkt) == OFP_PKT_DROP)
		odp_packet_free(pkt);
}
Exemplo n.º 12
0
static enum ofp_return_code ofp_ip_output_add_eth(odp_packet_t pkt,
						  struct ip_out *odata)
{
	uint8_t l2_size = 0;
	void *l2_addr;

	if (!odata->gw) /* link local */
		odata->gw = odata->ip->ip_dst.s_addr;

	if (ETH_WITHOUT_VLAN(odata->vlan, odata->out_port))
		l2_size = sizeof(struct ofp_ether_header);
	else
		l2_size = sizeof(struct ofp_ether_vlan_header);

	if (odp_packet_l2_offset(pkt) + l2_size == odp_packet_l3_offset(pkt)) {
		l2_addr = odp_packet_l2_ptr(pkt, NULL);
	} else if (odp_packet_l3_offset(pkt) >= l2_size) {
		odp_packet_l2_offset_set(pkt,
					odp_packet_l3_offset(pkt) - l2_size);
		l2_addr = odp_packet_l2_ptr(pkt, NULL);
	} else {
		l2_addr = odp_packet_push_head(pkt,
					l2_size - odp_packet_l3_offset(pkt));
		odp_packet_l2_offset_set(pkt, 0);
		odp_packet_l3_offset_set(pkt, l2_size);
		odp_packet_l4_offset_set(pkt, l2_size + (odata->ip->ip_hl<<2));
	}

	if (odp_unlikely(l2_addr == NULL)) {
		OFP_DBG("l2_addr == NULL");
		return OFP_PKT_DROP;
	}

	if (ETH_WITHOUT_VLAN(odata->vlan, odata->out_port)) {
		struct ofp_ether_header *eth =
				(struct ofp_ether_header *)l2_addr;
		uint32_t addr = odp_be_to_cpu_32(odata->ip->ip_dst.s_addr);

		if (OFP_IN_MULTICAST(addr)) {
			eth->ether_dhost[0] = 0x01;
			eth->ether_dhost[1] = 0x00;
			eth->ether_dhost[2] = 0x5e;
			eth->ether_dhost[3] = (addr >> 16) & 0x7f;
			eth->ether_dhost[4] = (addr >> 8) & 0xff;
			eth->ether_dhost[5] = addr & 0xff;
		} else if (odata->dev_out->ip_addr == odata->ip->ip_dst.s_addr) {
			odata->is_local_address = 1;
			ofp_copy_mac(eth->ether_dhost, &(odata->dev_out->mac[0]));
		} else if (ofp_get_mac(odata->dev_out, odata->gw, eth->ether_dhost) < 0) {
			send_arp_request(odata->dev_out, odata->gw);
			return ofp_arp_save_ipv4_pkt(pkt, odata->nh,
						     odata->gw, odata->dev_out);
		}

		ofp_copy_mac(eth->ether_shost, odata->dev_out->mac);
		eth->ether_type = odp_cpu_to_be_16(OFP_ETHERTYPE_IP);
	} else {
Exemplo n.º 13
0
uint8_t *odp_packet_l3(odp_packet_t pkt)
{
	const size_t offset = odp_packet_l3_offset(pkt);

	if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
		return NULL;

	return odp_packet_buf_addr(pkt) + offset;
}
Exemplo n.º 14
0
void odp_buffer_free(odp_buffer_t buf)
{
	odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(buf);
	pool_entry_t *pool = odp_buf_to_pool(buf_hdr);

	if (odp_unlikely(pool->s.low_wm_assert))
		ret_buf(&pool->s, buf_hdr);
	else
		ret_local_buf(&local_cache[pool->s.pool_id], buf_hdr);
}
Exemplo n.º 15
0
/**
 * Drop packets which input parsing marked as containing errors.
 *
 * Frees packets with error and modifies pkt_tbl[] to only contain packets with
 * no detected errors.
 *
 * @param pkt_tbl  Array of packets
 * @param num      Number of packets in pkt_tbl[]
 *
 * @return Number of packets dropped
 */
static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num)
{
	odp_packet_t pkt;
	unsigned dropped = 0;
	unsigned i, j;

	for (i = 0, j = 0; i < num; ++i) {
		pkt = pkt_tbl[i];

		if (odp_unlikely(odp_packet_has_error(pkt))) {
			odp_packet_free(pkt); /* Drop */
			dropped++;
		} else if (odp_unlikely(i != j++)) {
			pkt_tbl[j-1] = pkt;
		}
	}

	return dropped;
}
Exemplo n.º 16
0
/**
 * Drop packets which input parsing marked as containing errors.
 *
 * Frees packets with error and modifies pkt_tbl[] to only contain packets with
 * no detected errors.
 *
 * @param pkt_tbl  Array of packet
 * @param len      Length of pkt_tbl[]
 *
 * @return Number of packets with no detected error
 */
static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned len)
{
	odp_packet_t pkt;
	unsigned pkt_cnt = len;
	unsigned i, j;

	for (i = 0, j = 0; i < len; ++i) {
		pkt = pkt_tbl[i];

		if (odp_unlikely(odp_packet_error(pkt))) {
			odp_packet_free(pkt); /* Drop */
			pkt_cnt--;
		} else if (odp_unlikely(i != j++)) {
			pkt_tbl[j-1] = pkt;
		}
	}

	return pkt_cnt;
}
Exemplo n.º 17
0
void odp_buffer_free(odp_buffer_t buf)
{
	odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(buf);
	pool_entry_t *pool = odp_buf_to_pool(buf_hdr);

	ODP_ASSERT(buf_hdr->allocator != ODP_FREEBUF);

	if (odp_unlikely(pool->s.low_wm_assert))
		ret_buf(&pool->s, buf_hdr);
	else
		ret_local_buf(&pool->s.local_cache[local_id], buf_hdr);
}
Exemplo n.º 18
0
enum ofp_return_code ofp_eth_vlan_processing(odp_packet_t *pkt)
{
	uint16_t vlan = 0, ethtype;
	struct ofp_ether_header *eth;
	struct ofp_ifnet *ifnet = odp_packet_user_ptr(*pkt);

	eth = (struct ofp_ether_header *)odp_packet_l2_ptr(*pkt, NULL);

	if (odp_unlikely(eth == NULL)) {
		OFP_DBG("eth is NULL");
		return OFP_PKT_DROP;
	}

	ethtype = odp_be_to_cpu_16(eth->ether_type);

	if (ethtype == OFP_ETHERTYPE_VLAN) {
		struct ofp_ether_vlan_header *vlan_hdr;

		vlan_hdr = (struct ofp_ether_vlan_header *)eth;
		vlan = OFP_EVL_VLANOFTAG(odp_be_to_cpu_16(vlan_hdr->evl_tag));
		ethtype = odp_be_to_cpu_16(vlan_hdr->evl_proto);
		ifnet = ofp_get_ifnet(ifnet->port, vlan);
		if (!ifnet)
			return OFP_PKT_DROP;
		if (odp_likely(ofp_if_type(ifnet) != OFP_IFT_VXLAN))
			odp_packet_user_ptr_set(*pkt, ifnet);
	}

	OFP_DBG("ETH TYPE = %04x", ethtype);

	/* network layer classifier */
	switch (ethtype) {
	/* STUB: except for ARP, just terminate all traffic to slowpath.
	 * FIXME: test/implement other cases */
#ifdef INET
	case OFP_ETHERTYPE_IP:
		return ofp_ipv4_processing(pkt);
#endif /* INET */
#ifdef INET6
	case OFP_ETHERTYPE_IPV6:
		return ofp_ipv6_processing(pkt);
#endif /* INET6 */
#if 0
	case OFP_ETHERTYPE_MPLS:
		return OFP_PKT_DROP;
#endif
	case OFP_ETHERTYPE_ARP:
		return ofp_arp_processing(pkt);
	default:
		return OFP_PKT_CONTINUE;
	}
}
Exemplo n.º 19
0
static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr, odp_ipv4hdr_t *ipv4,
				size_t *offset_out)
{
	uint8_t ihl;
	uint16_t frag_offset;

	ihl = ODP_IPV4HDR_IHL(ipv4->ver_ihl);
	if (odp_unlikely(ihl < ODP_IPV4HDR_IHL_MIN)) {
		pkt_hdr->error_flags.ip_err = 1;
		return 0;
	}

	if (odp_unlikely(ihl > ODP_IPV4HDR_IHL_MIN)) {
		pkt_hdr->input_flags.ipopt = 1;
		return 0;
	}

	/* A packet is a fragment if:
	*  "more fragments" flag is set (all fragments except the last)
	*     OR
	*  "fragment offset" field is nonzero (all fragments except the first)
	*/
	frag_offset = odp_be_to_cpu_16(ipv4->frag_offset);
	if (odp_unlikely(ODP_IPV4HDR_IS_FRAGMENT(frag_offset))) {
		pkt_hdr->input_flags.ipfrag = 1;
		return 0;
	}

	if (ipv4->proto == ODP_IPPROTO_ESP ||
	    ipv4->proto == ODP_IPPROTO_AH) {
		pkt_hdr->input_flags.ipsec = 1;
		return 0;
	}

	/* Set pkt_hdr->input_flags.ipopt when checking L4 hdrs after return */

	*offset_out = sizeof(uint32_t) * ihl;
	return ipv4->proto;
}
Exemplo n.º 20
0
/**
 * Allocate per packet processing context and associate it with
 * packet buffer
 *
 * @param pkt  Packet
 *
 * @return pointer to context area
 */
static
pkt_ctx_t *alloc_pkt_ctx(odp_packet_t pkt)
{
	odp_buffer_t ctx_buf = odp_buffer_alloc(ctx_pool);
	pkt_ctx_t *ctx;

	if (odp_unlikely(ODP_BUFFER_INVALID == ctx_buf))
		return NULL;

	ctx = odp_buffer_addr(ctx_buf);
	memset(ctx, 0, sizeof(*ctx));
	ctx->buffer = ctx_buf;
	odp_packet_user_ptr_set(pkt, ctx);

	return ctx;
}
static void *pkt_io_recv(void *arg)
{
	odp_pktio_t pktio;
	odp_packet_t pkt, pkt_tbl[OFP_PKT_BURST_SIZE];
	int pkt_idx, pkt_cnt;
	struct pktio_thr_arg *thr_args;
	ofp_pkt_processing_func pkt_func;

	thr_args = arg;
	pkt_func = thr_args->pkt_func;

	if (odp_init_local(ODP_THREAD_WORKER)) {
		OFP_ERR("Error: ODP local init failed.\n");
		return NULL;
	}
	if (ofp_init_local()) {
		OFP_ERR("Error: OFP local init failed.\n");
		return NULL;
	}

	pktio = ofp_port_pktio_get(thr_args->port);

	OFP_DBG("PKT-IO receive starting on port: %d, pktio-id: %"PRIX64"\n",
		  thr_args->port, odp_pktio_to_u64(pktio));

	while (1) {
		pkt_cnt = odp_pktio_recv(pktio, pkt_tbl, OFP_PKT_BURST_SIZE);

		for (pkt_idx = 0; pkt_idx < pkt_cnt; pkt_idx++) {
			pkt = pkt_tbl[pkt_idx];

			if (odp_unlikely(odp_packet_has_error(pkt))) {
				OFP_DBG("Packet with error dropped.\n");
				odp_packet_free(pkt);
				continue;
			}

			ofp_packet_input(pkt, ODP_QUEUE_INVALID, pkt_func);
		}
#ifdef OFP_SEND_PKT_BURST
		ofp_send_pending_pkt_burst();
#endif /*OFP_SEND_PKT_BURST*/
	}

	/* Never reached */
	return NULL;
}
Exemplo n.º 22
0
int odp_pktio_restart(odp_pktio_t id)
{
	pktio_entry_t *entry;
	uint8_t port_id;
	int ret;

	entry = get_pktio_entry(id);
	if (entry == NULL) {
		ODP_DBG("pktio entry %d does not exist\n",
			id->unused_dummy_var);
		return -1;
	}

	if (odp_unlikely(is_free(entry))) {
		ODP_DBG("already freed pktio\n");
		return -1;
	}

	if (odp_pktio_is_not_hns_eth(entry)) {
		ODP_DBG("pktio entry %d is not ODP UMD pktio\n",
			id->unused_dummy_var);
		return -1;
	}

	port_id = entry->s.pkt_odp.portid;

	if (!odp_eth_dev_is_valid_port(port_id)) {
		ODP_DBG("pktio entry %d ODP UMD Invalid port_id=%d\n",
			id->unused_dummy_var, port_id);
		return -1;
	}

	/* Stop device */
	odp_eth_dev_stop(port_id);

	/* Start device */
	ret = odp_eth_dev_start(port_id);
	if (ret < 0) {
		ODP_ERR("odp_eth_dev_start:err=%d, port=%u\n",
			ret, (unsigned)port_id);
		return -1;
	}

	ODP_DBG("odp pmd restart done\n\n");

	return 0;
}
Exemplo n.º 23
0
enum ofp_return_code ofp_gre_processing(odp_packet_t *pkt)
{
	int frag_res = 0;
	struct ofp_ip *ip = (struct ofp_ip *)odp_packet_l3_ptr(*pkt, NULL);

	if (odp_unlikely(ofp_cksum_iph(ip, ip->ip_hl)))
		return OFP_PKT_DROP;

	if (odp_be_to_cpu_16(ip->ip_off) & 0x3fff) {
		frag_res = pkt_reassembly(pkt);
		if (frag_res != OFP_PKT_CONTINUE)
			return frag_res;

		ip = (struct ofp_ip *)odp_packet_l3_ptr(*pkt, NULL);
	}

	return ofp_inetsw[ofp_ip_protox_gre].pr_input(pkt, ip->ip_hl << 2);
}
Exemplo n.º 24
0
/**
 * Main receive function
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *gen_recv_thread(void *arg)
{
	int thr;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	odp_packet_t pkt;
	odp_event_t ev;

	thr = odp_thread_id();
	thr_args = arg;

	pktio = odp_pktio_lookup(thr_args->pktio_dev);
	if (pktio == ODP_PKTIO_INVALID) {
		EXAMPLE_ERR("  [%02i] Error: lookup of pktio %s failed\n",
			    thr, thr_args->pktio_dev);
		return NULL;
	}

	printf("  [%02i] created mode: RECEIVE\n", thr);

	for (;;) {
		if (args->appl.number != -1 &&
		    odp_atomic_load_u64(&counters.icmp) >=
		    (unsigned int)args->appl.number) {
			break;
		}

		/* Use schedule to get buf from any input queue */
		ev = odp_schedule(NULL, ODP_SCHED_WAIT);

		pkt = odp_packet_from_event(ev);
		/* Drop packets with errors */
		if (odp_unlikely(odp_packet_has_error(pkt))) {
			odp_packet_free(pkt);
			continue;
		}

		print_pkts(thr, &pkt, 1);

		odp_packet_free(pkt);
	}

	return arg;
}
Exemplo n.º 25
0
int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
{
	odp_buffer_hdr_t *hdr;
	int i;

	LOCK(queue);
	int status = LOAD_S32(queue->s.status);
	if (odp_unlikely(status < QUEUE_STATUS_READY)) {
		/* Bad queue, or queue has been destroyed.
		 * Scheduler finalizes queue destroy after this. */
		UNLOCK(queue);
		return -1;
	}

	hdr = LOAD_PTR(queue->s.head);

	if (hdr == NULL) {
		/* Already empty queue */
		if (status == QUEUE_STATUS_SCHED)
			STORE_S32(queue->s.status, QUEUE_STATUS_NOTSCHED);

		UNLOCK(queue);
		return 0;
	}

	for (i = 0; i < num && hdr; i++) {
		INVALIDATE(hdr);
		buf_hdr[i]       = hdr;
		hdr              = hdr->next;
		buf_hdr[i]->next = NULL;
	}

	STORE_PTR(queue->s.head, hdr);

	if (hdr == NULL) {
		/* Queue is now empty */
		STORE_PTR(queue->s.tail, NULL);
	}

	UNLOCK(queue);

	return i;
}
Exemplo n.º 26
0
int odp_hisi_timer_reset(struct odp_hisi_timer *tim, uint64_t ticks,
			 enum odp_hisi_timer_type type, unsigned tim_core,
			 odp_timer_cb_t fct, void *arg)
{
	uint64_t cur_time = odp_get_tsc_cycles();
	uint64_t period;

	if (odp_unlikely((tim_core != (unsigned)CORE_ID_ANY) &&
			 (!odp_core_is_enabled(tim_core))))
		return -1;

	if (type == PERIODICAL)
		period = ticks;
	else
		period = 0;

	return __odp_hisi_timer_reset(tim, cur_time + ticks, period, tim_core,
				      fct, arg, 0);
}
Exemplo n.º 27
0
/**
 * Packet IO worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	int thr;
	odp_queue_t outq_def;
	odp_packet_t pkt;
	odp_event_t ev;
	thread_args_t *thr_args = arg;

	stats_t *stats = calloc(1, sizeof(stats_t));
	*thr_args->stats = stats;

	thr = odp_thread_id();

	printf("[%02i] QUEUE mode\n", thr);
	odp_barrier_wait(&barrier);

	/* Loop packets */
	while (!exit_threads) {
		/* Use schedule to get buf from any input queue */
		ev  = odp_schedule(NULL, ODP_SCHED_WAIT);
		pkt = odp_packet_from_event(ev);

		/* Drop packets with errors */
		if (odp_unlikely(drop_err_pkts(&pkt, 1) == 0)) {
			stats->drops += 1;
			continue;
		}

		outq_def = lookup_dest_q(pkt);

		/* Enqueue the packet for output */
		if (odp_queue_enq(outq_def, ev)) {
			printf("  [%i] Queue enqueue failed.\n", thr);
			odp_packet_free(pkt);
			continue;
		}

		stats->packets += 1;
	}

	free(stats);
	return NULL;
}
Exemplo n.º 28
0
static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr, odp_ipv6hdr_t *ipv6,
				size_t *offset_out)
{
	if (ipv6->next_hdr == ODP_IPPROTO_ESP ||
	    ipv6->next_hdr == ODP_IPPROTO_AH) {
		pkt_hdr->input_flags.ipopt = 1;
		pkt_hdr->input_flags.ipsec = 1;
		return 0;
	}

	if (odp_unlikely(ipv6->next_hdr == ODP_IPPROTO_FRAG)) {
		pkt_hdr->input_flags.ipopt = 1;
		pkt_hdr->input_flags.ipfrag = 1;
		return 0;
	}

	/* Don't step through more extensions */
	*offset_out = ODP_IPV6HDR_LEN;
	return ipv6->next_hdr;
}
Exemplo n.º 29
0
int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
{
	odp_buffer_hdr_t *hdr;
	int i;

	LOCK(&queue->s.lock);
	if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
		/* Bad queue, or queue has been destroyed.
		 * Scheduler finalizes queue destroy after this. */
		UNLOCK(&queue->s.lock);
		return -1;
	}

	hdr = queue->s.head;

	if (hdr == NULL) {
		/* Already empty queue */
		if (queue->s.status == QUEUE_STATUS_SCHED)
			queue->s.status = QUEUE_STATUS_NOTSCHED;

		UNLOCK(&queue->s.lock);
		return 0;
	}

	for (i = 0; i < num && hdr; i++) {
		buf_hdr[i]       = hdr;
		hdr              = hdr->next;
		buf_hdr[i]->next = NULL;
	}

	queue->s.head = hdr;

	if (hdr == NULL) {
		/* Queue is now empty */
		queue->s.tail = NULL;
	}

	UNLOCK(&queue->s.lock);

	return i;
}
Exemplo n.º 30
0
enum ofp_return_code ofp_gre_processing(odp_packet_t pkt)
{
	struct ofp_ip *ip = (struct ofp_ip *)odp_packet_l3_ptr(pkt, NULL);

	if (odp_unlikely(ofp_cksum_buffer((uint16_t *) ip, ip->ip_hl<<2)))
		return OFP_PKT_DROP;

	if (odp_be_to_cpu_16(ip->ip_off) & 0x3fff) {
		OFP_UPDATE_PACKET_STAT(rx_ip_frag, 1);

		pkt = ofp_ip_reass(pkt);
		if (pkt == ODP_PACKET_INVALID)
			return OFP_PKT_ON_HOLD;

		OFP_UPDATE_PACKET_STAT(rx_ip_reass, 1);

		ip = (struct ofp_ip *)odp_packet_l3_ptr(pkt, NULL);
	}

	return ofp_inetsw[ofp_ip_protox_gre].pr_input(pkt, ip->ip_hl << 2);
}