Пример #1
0
static void
test_ofp_packet_input_local_UDPv4_hook(void)
{
    odp_packet_t pkt;
    int res;

    /* Call ofp_packet_input with a pkt with destination ip
     * that matches the local ip on ifnet.
     * The packet is terminated in local UDPv4 hook */
    my_test_val = TEST_LOCAL_UDPv4_HOOK;
    ifnet->ip_addr = dst_ipaddr;
    if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame),
                              dst_ipaddr, 0)) {
        CU_FAIL("Fail to create packet");
        return;
    }

    res = ofp_packet_input(pkt, interface_queue[port],
                           ofp_eth_vlan_processing);
    CU_ASSERT_EQUAL(res, OFP_TEST_LOCAL_UDPv4_HOOK);
#ifdef SP
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID);
#endif /* SP */
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID);
    ifnet->ip_addr = 0;
    CU_PASS("ofp_packet_input_local_UDPv4_hook");
}
Пример #2
0
static void
test_ofp_packet_input_to_sp(void)
{
    odp_packet_t pkt;
    odp_event_t ev;
    int res;

    my_test_val = TEST_FORWARD_HOOK;
    /* Call ofp_packet_input using a pkt with destination ip
     * that does NOT match the local ip on ifnet and NO route is found.
     * The packet is forwarded to slow path queue. */
    if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame), 0, 0)) {
        CU_FAIL("Fail to create packet");
        return;
    }

    res = ofp_packet_input(pkt, interface_queue[port],
                           ofp_eth_vlan_processing);

    CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

    CU_ASSERT_NOT_EQUAL(ev = odp_queue_deq(ifnet->spq_def),
                        ODP_EVENT_INVALID);
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID);
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID);

    if (memcmp(odp_packet_data(odp_packet_from_event(ev)),
               in_pkt_data, sizeof(test_frame)))
        CU_FAIL("corrupt data sent to slow path");
    odp_packet_free(odp_packet_from_event(ev));
    CU_PASS("ofp_packet_input_to_sp");
}
Пример #3
0
static void
test_send_frame_vlan_to_vlan(void)
{
	odp_packet_t pkt = ODP_PACKET_INVALID;
	odp_event_t ev;
	uint8_t check_buf[144];
	int res;

	if (create_odp_packet_ip4(&pkt, test_frame_vlan,
				  sizeof(test_frame_vlan), 0)) {
		CU_FAIL("Fail to create packet");
		return;
	}

	memcpy(check_buf, test_frame_vlan, sizeof(test_frame_vlan));
	check_buf[15] = dev_vlan->vlan;

	res = ofp_send_frame(dev_vlan, pkt);
	CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

	ev = odp_queue_deq(dev->outq_def);
	CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);

	pkt = odp_packet_from_event(ev);
	CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt), sizeof(test_frame_vlan));

	if (memcmp(odp_packet_l2_ptr(pkt, NULL), check_buf,
		   sizeof(test_frame_vlan)))
		CU_FAIL("Frame data mismatch.");
}
Пример #4
0
static void
test_packet_output_gre_no_nexthop(void)
{
	odp_packet_t pkt = ODP_PACKET_INVALID;
	odp_event_t ev;
	int res;

	if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame),
				  tun_p2p + 1)) {
		CU_FAIL("Fail to create packet");
		return;
	}

	/*
	 * Packet's destination is GRE tunnel's p2p address, no next hop
	 * is found for tunnel destination address, packet is dropped.
	 */
	res = ofp_ip_output(pkt, NULL);
	CU_ASSERT_EQUAL(res, OFP_PKT_DROP);

	res = ofp_send_pending_pkt();
	CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

	ev = odp_queue_deq(dev->outq_def);
	CU_ASSERT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);
}
Пример #5
0
static int destroy_inq(odp_pktio_t pktio)
{
	odp_queue_t inq;
	odp_event_t ev;
	odp_queue_type_t q_type;

	inq = odp_pktio_inq_getdef(pktio);

	if (inq == ODP_QUEUE_INVALID) {
		CU_FAIL("attempting to destroy invalid inq");
		return -1;
	}

	CU_ASSERT(odp_pktio_inq_remdef(pktio) == 0);

	q_type = odp_queue_type(inq);

	/* flush any pending events */
	while (1) {
		if (q_type == ODP_QUEUE_TYPE_POLL)
			ev = odp_queue_deq(inq);
		else
			ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);

		if (ev != ODP_EVENT_INVALID)
			odp_event_free(ev);
		else
			break;
	}

	return odp_queue_destroy(inq);
}
Пример #6
0
static int receive_packets(odp_queue_t pollq,
			   odp_event_t *event_tbl, unsigned num_pkts)
{
	int n_ev = 0;

	if (num_pkts == 0)
		return 0;

	if (pollq != ODP_QUEUE_INVALID) {
		if (num_pkts == 1) {
			event_tbl[0] = odp_queue_deq(pollq);
			n_ev = event_tbl[0] != ODP_EVENT_INVALID;
		} else {
			n_ev = odp_queue_deq_multi(pollq, event_tbl, num_pkts);
		}
	} else {
		if (num_pkts == 1) {
			event_tbl[0] = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
			n_ev = event_tbl[0] != ODP_EVENT_INVALID;
		} else {
			n_ev = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT,
						  event_tbl, num_pkts);
		}
	}
	return n_ev;
}
Пример #7
0
static int empty_inq(odp_pktio_t pktio)
{
	odp_queue_t queue;
	odp_event_t ev;
	odp_queue_type_t q_type;

	if (odp_pktin_event_queue(pktio, &queue, 1) != 1)
		return -1;

	q_type = odp_queue_type(queue);

	/* flush any pending events */
	while (1) {
		if (q_type == ODP_QUEUE_TYPE_PLAIN)
			ev = odp_queue_deq(queue);
		else
			ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);

		if (ev != ODP_EVENT_INVALID)
			odp_event_free(ev);
		else
			break;
	}

	return 0;
}
Пример #8
0
static int destroy_inq(odp_pktio_t pktio)
{
	odp_queue_t inq;
	odp_event_t ev;
	odp_queue_type_t q_type;

	inq = odp_pktio_inq_getdef(pktio);

	if (inq == ODP_QUEUE_INVALID)
		return -1;

	odp_pktio_inq_remdef(pktio);

	q_type = odp_queue_type(inq);

	/* flush any pending events */
	while (1) {
		if (q_type == ODP_QUEUE_TYPE_POLL)
			ev = odp_queue_deq(inq);
		else
			ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);

		if (ev != ODP_EVENT_INVALID)
			odp_buffer_free(odp_buffer_from_event(ev));
		else
			break;
	}

	return odp_queue_destroy(inq);
}
Пример #9
0
int odp_schedule_term_global(void)
{
	int ret = 0;
	int rc = 0;
	int i, j;

	for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
		for (j = 0; j < QUEUES_PER_PRIO; j++) {
			odp_queue_t  pri_q;
			odp_event_t  ev;

			pri_q = sched->pri_queue[i][j];

			while ((ev = odp_queue_deq(pri_q)) !=
			      ODP_EVENT_INVALID) {
				odp_buffer_t buf;
				sched_cmd_t *sched_cmd;

				buf = odp_buffer_from_event(ev);
				sched_cmd = odp_buffer_addr(buf);

				if (sched_cmd->cmd == SCHED_CMD_DEQUEUE) {
					queue_entry_t *qe;
					odp_buffer_hdr_t *buf_hdr[1];
					int num;

					qe  = sched_cmd->qe;
					num = queue_deq_multi(qe, buf_hdr, 1);

					if (num < 0)
						queue_destroy_finalize(qe);

					if (num > 0)
						ODP_ERR("Queue not empty\n");
				} else
					odp_buffer_free(buf);
			}

			if (odp_queue_destroy(pri_q)) {
				ODP_ERR("Pri queue destroy fail.\n");
				rc = -1;
			}
		}
	}

	if (odp_pool_destroy(sched->pool) != 0) {
		ODP_ERR("Pool destroy fail.\n");
		rc = -1;
	}

	ret = odp_shm_free(sched->shm);
	if (ret < 0) {
		ODP_ERR("Shm free failed for odp_scheduler");
		rc = -1;
	}

	return rc;
}
Пример #10
0
/*
 * Tests
 */
static void
test_packet_output_gre(void)
{
	odp_packet_t pkt = ODP_PACKET_INVALID;
	odp_event_t ev;
	int res;
	struct ofp_ether_header *eth;
	struct ofp_ip *ip;
	struct ofp_ip *ip_orig;
	struct ofp_greip *greip;

	if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame),
				  tun_p2p)) {
		CU_FAIL("Fail to create packet");
		return;
	}

	/*
	 * Packet's destination is GRE tunnel's p2p address, next hop is GRE
	 * interface. GRE+IP header is prepended. Packet's new destination is
	 * link local. Packet is put into output queue.
	 */
	res = ofp_ip_output(pkt, NULL);
	CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

	res = ofp_send_pending_pkt();
	CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

	ev = odp_queue_deq(dev->outq_def);
	CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);

	pkt = odp_packet_from_event(ev);
	CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt),
			      sizeof(test_frame) + 20 + 4);

	eth = odp_packet_l2_ptr(pkt, NULL);
	if (memcmp(eth->ether_dhost, tun_rem_mac, OFP_ETHER_ADDR_LEN))
		CU_FAIL("Bad destination mac address.");
	if (memcmp(eth->ether_shost, dev->mac, OFP_ETHER_ADDR_LEN))
		CU_FAIL("Bad source mac address.");

	ip = odp_packet_l3_ptr(pkt, NULL);
	CU_ASSERT_EQUAL(ip->ip_src.s_addr, dev_ip);
	CU_ASSERT_EQUAL(ip->ip_dst.s_addr, tun_rem_ip);
	CU_ASSERT_EQUAL(ip->ip_p, OFP_IPPROTO_GRE);

	greip = (struct ofp_greip *)ip;
	CU_ASSERT_EQUAL(greip->gi_g.flags, 0);
	CU_ASSERT_EQUAL(greip->gi_g.ptype,
			odp_cpu_to_be_16(OFP_ETHERTYPE_IP));

	/* inner ip */
	ip = (struct ofp_ip *)(greip + 1);
	ip_orig = (struct ofp_ip *)(&orig_pkt_data[OFP_ETHER_HDR_LEN]);
	if (memcmp(ip, ip_orig, odp_be_to_cpu_16(ip_orig->ip_len)))
		CU_FAIL("Inner IP packet error.");
}
Пример #11
0
static void
test_ofp_packet_input_send_arp(void)
{
    odp_packet_t pkt;
    odp_event_t ev;
    int res;

    /* Call ofp_packet_input using a pkt with destination ip
     * that does NOT match the local ip on ifnet and a route is found.
     * No ARP is found for gateway IP so an ARP req is sent.
     * Function returns OFP_PKT_DROP and packet can be reused.*/
    my_test_val = TEST_FORWARD_HOOK;

    test_ofp_add_route(port, vrf, vlan, dst_ipaddr, 24, 4,
                       dst_ipaddr + 1);

    if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame),
                              dst_ipaddr, 0)) {
        CU_FAIL("Fail to create packet");
        return;
    }

    res = ofp_packet_input(pkt, interface_queue[port],
                           ofp_eth_vlan_processing);
    CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);
    odp_packet_free(pkt);

    CU_ASSERT_NOT_EQUAL(ev = odp_queue_deq(ifnet->outq_def),
                        ODP_EVENT_INVALID);
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID);
#ifdef SP
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID);
#endif /* SP */

    pkt = odp_packet_from_event(ev);
    CU_ASSERT_NOT_EQUAL(pkt, ODP_PACKET_INVALID);
    CU_ASSERT_EQUAL(odp_packet_has_arp(pkt), 1);
    CU_ASSERT_EQUAL(odp_packet_has_vlan(pkt), 0);
    CU_ASSERT_EQUAL(odp_packet_len(pkt), sizeof(struct ofp_arphdr) +
                    sizeof(struct ofp_ether_header));
    odp_packet_free(odp_packet_from_event(ev));
    ofp_arp_init_tables(); /* to clean saved packet */
    CU_PASS("ofp_packet_input_send_arp");
}
Пример #12
0
static void test_ofp_packet_input_gre_orig_pkt_to_sp(void)
{
    odp_packet_t pkt;
    int res;
#ifdef SP
    odp_event_t ev;
#endif

    my_test_val = TEST_LOCAL_HOOK_GRE_APP;
    /* Call ofp_packet_input using a GRE pkt with destination ip
     * that matches the local ip on ifnet, tunnel not found,
     * packet offered to GRE hook, returns continue.
     * Full packet sent to slowpath */

    ifnet->ip_addr = local_ip;
    if (create_odp_packet_ip4(&pkt, gre_frame, sizeof(gre_frame),
                              local_ip, tun_rem_ip + 1)) {
        CU_FAIL("Fail to create packet");
        return;
    }

    res = ofp_packet_input(pkt, interface_queue[port],
                           ofp_eth_vlan_processing);

#ifdef SP
    CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

    CU_ASSERT_NOT_EQUAL_FATAL(ev = odp_queue_deq(ifnet->spq_def),
                              ODP_EVENT_INVALID);
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID);
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID);

    if (memcmp(odp_packet_data(odp_packet_from_event(ev)),
               in_pkt_data, sizeof(gre_frame)))
        CU_FAIL("corrupt data sent to slow path");

    odp_packet_free(odp_packet_from_event(ev));
    ifnet->ip_addr = 0;
    CU_PASS("ofp_packet_input_gre_orig_pkt_to_sp");
#else
    CU_ASSERT_EQUAL(res, OFP_PKT_DROP);
    CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID);
#endif
}
Пример #13
0
static void cleanup_pkt_queue(odp_queue_t pkt_queue)
{
	odp_event_t evt;

	while (1) {
		evt = odp_queue_deq(pkt_queue);
		if (evt == ODP_EVENT_INVALID)
			break;
		odp_packet_free(odp_packet_from_event(evt));
	}
}
Пример #14
0
odp_bool_t verify_stream_db_outputs(void)
{
	odp_bool_t done = TRUE;
	stream_db_entry_t *stream = NULL;
	const char *env;

	env = getenv("ODP_IPSEC_STREAM_VERIFY_MDEQ");
	/* For each stream look for output packets */
	for (stream = stream_db->list; NULL != stream; stream = stream->next) {
		int idx;
		int count;
		odp_queue_t queue;
		odp_event_t ev_tbl[LOOP_DEQ_COUNT];

		queue = query_loopback_db_outq(stream->output.loop);

		if (ODP_QUEUE_INVALID == queue)
			continue;

		for (;;) {
			if (env) {
				count = odp_queue_deq_multi(queue,
							    ev_tbl,
							    LOOP_DEQ_COUNT);
			} else {
				ev_tbl[0] = odp_queue_deq(queue);
				count = (ev_tbl[0] != ODP_EVENT_INVALID) ?
					1 : 0;
			}
			if (!count)
				break;
			for (idx = 0; idx < count; idx++) {
				odp_bool_t good;
				odp_packet_t pkt;

				pkt = odp_packet_from_event(ev_tbl[idx]);

				good = verify_ipv4_packet(stream, pkt);
				if (good)
					stream->verified++;
				odp_packet_free(pkt);
			}
		}

		printf("Stream %d %d\n", stream->created, stream->verified);

		if (stream->created != stream->verified)
			done = FALSE;
	}
	return done;
}
Пример #15
0
static void test_packet_size_is_less_then_mtu(void)
{
    odp_packet_t pkt_orig, pkt_sent;
    odp_event_t ev;
    int res;
    struct ofp_ether_header *eth;

    if (create_odp_packet_ip4(&pkt_orig, pkt1_frag1,
                              sizeof(pkt1_frag1), 0)) {
        CU_FAIL("Fail to create packet");
        return;
    }

    res = ofp_ip_output(pkt_orig, &nexthop);
    CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);
    res = ofp_send_pending_pkt();
    CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

    ev = odp_queue_deq(dev->outq_def);
    CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);
    CU_ASSERT_EQUAL_FATAL(odp_queue_deq(dev->outq_def), ODP_EVENT_INVALID);

    pkt_sent = odp_packet_from_event(ev);
    CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt_sent), sizeof(pkt1_frag1));

    eth = odp_packet_l2_ptr(pkt_sent, NULL);
    if (memcmp(eth->ether_dhost, dst_mac, OFP_ETHER_ADDR_LEN))
        CU_FAIL("Bad destination mac address.");
    if (memcmp(eth->ether_shost, dev->mac, OFP_ETHER_ADDR_LEN))
        CU_FAIL("Bad source mac address.");
    if (memcmp(odp_packet_l3_ptr(pkt_sent, NULL),
               &orig_pkt_data[OFP_ETHER_HDR_LEN],
               sizeof(pkt1_frag1) - OFP_ETHER_HDR_LEN))
        CU_FAIL("corrupt l3 + data forwarded");
    CU_PASS("Correct packet");

    odp_packet_free(pkt_sent);
}
Пример #16
0
int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len)
{
	pktio_entry_t *pktio_entry = get_entry(id);
	unsigned pkts = 0;
	odp_buffer_t buf;

	if (pktio_entry == NULL)
		return -1;

	lock_entry(pktio_entry);

	if (pktio_entry->s.inq_default == ODP_QUEUE_INVALID) {
		char name[ODP_QUEUE_NAME_LEN];
		odp_queue_param_t qparam;
		odp_queue_t inq_def;
		/*
		 * Create a default input queue.
		 * FIXME: IT is a kind of WA for current ODP API usage.
		 * It should be revised.
		 */
		ODP_DBG("Creating default input queue\n");
		qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qparam.sched.sync  = ODP_SCHED_SYNC_NONE;
		qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;
		snprintf(name, sizeof(name), "%i-pktio_inq_default", (int)id);
		name[ODP_QUEUE_NAME_LEN-1] = '\0';
		inq_def = odp_queue_create(name, ODP_QUEUE_TYPE_PKTIN, &qparam);
		if (inq_def == ODP_QUEUE_INVALID) {
			ODP_ERR("pktio queue creation failed\n");
			goto unlock;
		}

		if (odp_pktio_inq_setdef(id, inq_def)) {
			ODP_ERR("default input-Q setup\n");
			goto unlock;
		}
	}

	for (pkts = 0; pkts < len; pkts++) {
		buf = odp_queue_deq(pktio_entry->s.inq_default);
		if (!odp_buffer_is_valid(buf))
			break;

		pkt_table[pkts] = odp_packet_from_buffer(buf);
	}
unlock:
	unlock_entry(pktio_entry);
	return pkts;
}
Пример #17
0
static odp_event_t queue_deq_wait_time(odp_queue_t queue, uint64_t ns)
{
	odp_time_t wait, end;
	odp_event_t ev;

	wait = odp_time_local_from_ns(ns);
	end = odp_time_sum(odp_time_local(), wait);
	do {
		ev = odp_queue_deq(queue);
		if (ev != ODP_EVENT_INVALID)
			return ev;
	} while (odp_time_cmp(end, odp_time_local()) > 0);

	return ODP_EVENT_INVALID;
}
Пример #18
0
/**
 * Sleep for the specified amount of milliseconds
 * Use ODP timer, busy wait until timer expired and timeout event received
 */
static void millisleep(uint32_t ms,
		       odp_timer_pool_t tp,
		       odp_timer_t tim,
		       odp_queue_t q,
		       odp_timeout_t tmo)
{
	uint64_t ticks = odp_timer_ns_to_tick(tp, 1000000ULL * ms);
	odp_event_t ev = odp_timeout_to_event(tmo);
	int rc = odp_timer_set_rel(tim, ticks, &ev);
	if (rc != ODP_TIMER_SUCCESS)
		EXAMPLE_ABORT("odp_timer_set_rel() failed\n");
	/* Spin waiting for timeout event */
	while ((ev = odp_queue_deq(q)) == ODP_EVENT_INVALID)
		(void)0;
}
Пример #19
0
/**
 * odp_schedule replacement to poll queues versus using ODP scheduler
 */
static
odp_event_t polled_odp_schedule_cb(odp_queue_t *from)
{
	int idx = 0;

	while (1) {
		if (idx >= num_polled_queues)
			idx = 0;

		odp_queue_t queue = poll_queues[idx++];
		odp_event_t buf;

		buf = odp_queue_deq(queue);

		if (ODP_EVENT_INVALID != buf) {
			*from = queue;
			return buf;
		}
	}

	*from = ODP_QUEUE_INVALID;
	return ODP_EVENT_INVALID;
}
Пример #20
0
static void
test_send_frame_novlan_to_vlan(void)
{
	odp_packet_t pkt = ODP_PACKET_INVALID;
	odp_event_t ev;
	struct ofp_ether_vlan_header *eth_vlan;
	uint8_t *buf;
	int res;

	if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame), 0)) {
		CU_FAIL("Fail to create packet");
		return;
	}

	res = ofp_send_frame(dev_vlan, pkt);
	CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

	ev = odp_queue_deq(dev->outq_def);
	CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);

	pkt = odp_packet_from_event(ev);
	CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt), sizeof(test_frame) + 4);

	eth_vlan = odp_packet_l2_ptr(pkt, NULL);
	if (memcmp(eth_vlan, test_frame, 2 * OFP_ETHER_ADDR_LEN))
		CU_FAIL("Frame data mismatch.");

	CU_ASSERT_EQUAL(eth_vlan->evl_encap_proto,
			odp_cpu_to_be_16(OFP_ETHERTYPE_VLAN));
	CU_ASSERT_EQUAL(eth_vlan->evl_tag,
			odp_cpu_to_be_16(dev_vlan->vlan));

	buf = (uint8_t *)eth_vlan;
	if (memcmp(&buf[16], &test_frame[12],
		   sizeof(test_frame) - 2 * OFP_ETHER_ADDR_LEN))
		CU_FAIL("Frame data mismatch.");
}
Пример #21
0
static void
test_send_frame_packet_len_bigger_than_mtu(void)
{
	odp_packet_t pkt = ODP_PACKET_INVALID;
	odp_event_t ev;
	uint32_t old_mtu;
	int res;

	if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame), 0)) {
		CU_FAIL("Fail to create packet");
		return;
	}

	old_mtu = dev->if_mtu;
	dev->if_mtu = 120;

	res = ofp_send_frame(dev, pkt);
	CU_ASSERT_EQUAL(res, OFP_PKT_DROP);

	dev->if_mtu = old_mtu;

	ev = odp_queue_deq(dev->outq_def);
	CU_ASSERT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);
}
Пример #22
0
static void test_dont_fragment_set_pkt_dropped(void)
{
    odp_packet_t pkt;
    odp_event_t ev;
    int res;
    struct ofp_ip *ip;

    if (create_odp_packet_ip4(&pkt, pkt1_full,
                              sizeof(pkt1_full), 0)) {
        CU_FAIL("Fail to create packet");
        return;
    }

    ip = odp_packet_l3_ptr(pkt, NULL);
    ip->ip_off |= odp_cpu_to_be_16(OFP_IP_DF);

    res = ofp_ip_output(pkt, &nexthop);
    CU_ASSERT_EQUAL(res, OFP_PKT_DROP);

    ev = odp_queue_deq(dev->outq_def);
    CU_ASSERT_EQUAL(ev, ODP_EVENT_INVALID);

    odp_packet_free(pkt);
}
Пример #23
0
static void test_fragment_fragmented_to_two(void)
{
    odp_packet_t pkt_orig, pkt_sent;
    odp_event_t ev;
    int res;
    struct ofp_ether_header *eth;
    struct ofp_ip *ip;
    struct ofp_ip *ip_orig;
    uint16_t pl_pos, pl_len, orig_pl_len, pktlen, start_offset;

    dev->if_mtu = 620;

    if (create_odp_packet_ip4(&pkt_orig, pkt1_frag2,
                              sizeof(pkt1_frag2), 0)) {
        CU_FAIL("Fail to create packet");
        return;
    }

    res = ofp_ip_output(pkt_orig, &nexthop);
    CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

    res = ofp_send_pending_pkt();
    CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

    /* ASSERT 1st fragment */
    ev = odp_queue_deq(dev->outq_def);
    CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);

    pkt_sent = odp_packet_from_event(ev);
    CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt_sent),
                          dev->if_mtu + OFP_ETHER_HDR_LEN);

    eth = odp_packet_l2_ptr(pkt_sent, NULL);
    if (memcmp(eth->ether_dhost, dst_mac, OFP_ETHER_ADDR_LEN))
        CU_FAIL("Bad destination mac address.");
    if (memcmp(eth->ether_shost, dev->mac, OFP_ETHER_ADDR_LEN))
        CU_FAIL("Bad source mac address.");

    ip = odp_packet_l3_ptr(pkt_sent, NULL);
    ip_orig = (struct ofp_ip *)(&orig_pkt_data[OFP_ETHER_HDR_LEN]);
    orig_pl_len = odp_be_to_cpu_16(ip_orig->ip_len) - (ip_orig->ip_hl<<2);
    start_offset = odp_be_to_cpu_16(ip_orig->ip_off) & OFP_IP_OFFMASK;

    assert_ip_header(ip, ip_orig, dev->if_mtu, 1, start_offset);

    pl_len = dev->if_mtu - (ip->ip_hl<<2);
    if (memcmp((uint8_t *)ip + (ip->ip_hl<<2),
               (uint8_t *)ip_orig + (ip_orig->ip_hl<<2),
               pl_len))
        CU_FAIL("corrupt l3 + data forwarded");
    pl_pos = pl_len;
    CU_PASS("Correct packet");

    odp_packet_free(pkt_sent);

    /* ASSERT 2nd fragment */
    ev = odp_queue_deq(dev->outq_def);
    CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);

    pkt_sent = odp_packet_from_event(ev);
    pl_len = orig_pl_len - pl_pos;
    pktlen = pl_len + OFP_ETHER_HDR_LEN + sizeof(struct ofp_ip);
    CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt_sent), pktlen);

    eth = odp_packet_l2_ptr(pkt_sent, NULL);
    if (memcmp(eth->ether_dhost, dst_mac, OFP_ETHER_ADDR_LEN))
        CU_FAIL("Bad destination mac address.");
    if (memcmp(eth->ether_shost, dev->mac, OFP_ETHER_ADDR_LEN))
        CU_FAIL("Bad source mac address.");

    ip = odp_packet_l3_ptr(pkt_sent, NULL);

    assert_ip_header(ip, ip_orig, pl_len + sizeof(struct ofp_ip),
                     0, start_offset + pl_pos/8);

    if (memcmp((uint8_t *)ip + (ip->ip_hl<<2),
               (uint8_t *)ip_orig + (ip_orig->ip_hl<<2) + pl_pos,
               pl_len))
        CU_FAIL("corrupt l3 + data forwarded");
    CU_PASS("Correct packet");

    odp_packet_free(pkt_sent);

    /* no more fragments */
    ev = odp_queue_deq(dev->outq_def);
    CU_ASSERT_EQUAL(ev, ODP_EVENT_INVALID);

    dev->if_mtu = def_mtu;
}
Пример #24
0
/**
 * Packet IO loopback worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	int thr;
	odp_buffer_pool_t pkt_pool;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	odp_queue_t outq_def;
	odp_queue_t inq_def;
	char inq_name[ODP_QUEUE_NAME_LEN];
	odp_queue_param_t qparam;
	odp_packet_t pkt;
	odp_buffer_t buf;
	int ret;
	unsigned long pkt_cnt = 0;
	unsigned long err_cnt = 0;
	odp_pktio_params_t params;
	socket_params_t *sock_params = &params.sock_params;

	thr = odp_thread_id();
	thr_args = arg;

	printf("Pktio thread [%02i] starts, pktio_dev:%s\n", thr,
	       thr_args->pktio_dev);

	/* Lookup the packet pool */
	pkt_pool = odp_buffer_pool_lookup("packet_pool");
	if (pkt_pool == ODP_BUFFER_POOL_INVALID || pkt_pool != thr_args->pool) {
		ODP_ERR("  [%02i] Error: pkt_pool not found\n", thr);
		return NULL;
	}

	/* Open a packet IO instance for this thread */
	sock_params->type = thr_args->type;
	sock_params->fanout = thr_args->fanout;
	pktio = odp_pktio_open(thr_args->pktio_dev, pkt_pool, &params);
	if (pktio == ODP_PKTIO_INVALID) {
		ODP_ERR("  [%02i] Error: pktio create failed\n", thr);
		return NULL;
	}

	/*
	 * Create and set the default INPUT queue associated with the 'pktio'
	 * resource
	 */
	qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qparam.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
	qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;
	snprintf(inq_name, sizeof(inq_name), "%i-pktio_inq_def", (int)pktio);
	inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';

	inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
	if (inq_def == ODP_QUEUE_INVALID) {
		ODP_ERR("  [%02i] Error: pktio queue creation failed\n", thr);
		return NULL;
	}

	ret = odp_pktio_inq_setdef(pktio, inq_def);
	if (ret != 0) {
		ODP_ERR("  [%02i] Error: default input-Q setup\n", thr);
		return NULL;
	}

	printf("  [%02i] created pktio:%02i, queue mode (ATOMIC queues)\n"
	       "          default pktio%02i-INPUT queue:%u\n",
		thr, pktio, pktio, inq_def);

	/* Loop packets */
	for (;;) {
		odp_pktio_t pktio_tmp;

#if 1
		/* Use schedule to get buf from any input queue */
		buf = odp_schedule(NULL, ODP_SCHED_WAIT);
#else
		/* Always dequeue from the same input queue */
		buf = odp_queue_deq(inq_def);
		if (!odp_buffer_is_valid(buf))
			continue;
#endif

		pkt = odp_packet_from_buffer(buf);

		/* Drop packets with errors */
		if (odp_unlikely(drop_err_pkts(&pkt, 1) == 0)) {
			ODP_ERR("Drop frame - err_cnt:%lu\n", ++err_cnt);
			continue;
		}

		pktio_tmp = odp_pktio_get_input(pkt);
		outq_def = odp_pktio_outq_getdef(pktio_tmp);

		if (outq_def == ODP_QUEUE_INVALID) {
			ODP_ERR("  [%02i] Error: def output-Q query\n", thr);
			return NULL;
		}

		/* Swap Eth MACs and possibly IP-addrs before sending back */
		swap_pkt_addrs(&pkt, 1);

		/* Enqueue the packet for output */
		odp_queue_enq(outq_def, buf);

		/* Print packet counts every once in a while */
		if (odp_unlikely(pkt_cnt++ % 100000 == 0)) {
			printf("  [%02i] pkt_cnt:%lu\n", thr, pkt_cnt);
			fflush(NULL);
		}
	}

/* unreachable */
}
Пример #25
0
static ngx_int_t
ngx_select_process_events(ngx_cycle_t *cycle, ngx_msec_t timer,
    ngx_uint_t flags)
{
	odp_packet_t pkts[OFP_PKT_RX_BURST_SIZE];
        odp_packet_t odp_pkt;
        int pkt_cnt = 0;
        int pkt_idx = 0;

	pkt_cnt = odp_pktin_recv(in_queue, pkts, OFP_PKT_RX_BURST_SIZE);

        for (pkt_idx = 0; pkt_idx < pkt_cnt; pkt_idx++) {
                odp_pkt = pkts[pkt_idx];
                ofp_packet_input(odp_pkt, ODP_QUEUE_INVALID, ofp_eth_vlan_processing);
        }

	if (pkt_cnt < 1) ofp_send_pending_pkt();

	static uint32_t count = 0;
	/*odp_queue_deq has a lock that impacts performance*/
	if (count ++ > 50) {
		count = 0;

		odp_queue_t timer_queue = ofp_timer_queue_cpu(-1);
		odp_event_t event = odp_queue_deq(timer_queue);

		if (event != ODP_EVENT_INVALID) {
			if (odp_event_type(event) == ODP_EVENT_TIMEOUT) {
				ofp_timer_handle(event);
			} else {
				odp_buffer_free(odp_buffer_from_event(event));
			}
		}
	}

#if OFP_NOTIFY
    return NGX_OK;
#endif
    int                ready, nready;
    ngx_err_t          err;
    ngx_uint_t         i, found;
    ngx_event_t       *ev;
    ngx_queue_t       *queue;
    struct timeval     tv, *tp;
    ngx_connection_t  *c;

    if (max_fd == -1) {
        for (i = 0; i < nevents; i++) {
            c = event_index[i]->data;
        ngx_log_debug(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                       "change max_fd: %i, c->fd : %d", max_fd, c->fd);
            if (max_fd < c->fd) {
                max_fd = c->fd;
            }
        }

        ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                       "change max_fd: %i", max_fd);
    }

#if (NGX_DEBUG)
    if (cycle->log->log_level & NGX_LOG_DEBUG_ALL) {
        for (i = 0; i < nevents; i++) {
            ev = event_index[i];
            c = ev->data;
            ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                           "select event: fd:%d wr:%d", c->fd, ev->write);
        }

        ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                       "max_fd: %i", max_fd);
    }
#endif

    if (timer == NGX_TIMER_INFINITE) {
        tp = NULL;

    } else {
        tv.tv_sec = (long) (timer / 1000);
        tv.tv_usec = (long) ((timer % 1000) * 1000);
        tp = &tv;
    }

    ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                   "select timer: %M", timer);

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                   "select max_fd %d, tp %p ", max_fd, tp);
    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                   " master %p : work %p", (ofp_fd_set *)&master_read_fd_set,
		   (ofp_fd_set *)&work_read_fd_set);
    ready = select(max_fd + 1, &master_read_fd_set, &master_write_fd_set, NULL, tp);

    err = (ready == -1) ? ngx_errno : 0;

    if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) {
        ngx_time_update();
    }

    ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                   "select ready %d", ready);

    if (err) {
        ngx_uint_t  level;

        if (err == NGX_EINTR) {

            if (ngx_event_timer_alarm) {
                ngx_event_timer_alarm = 0;
                return NGX_OK;
            }

            level = NGX_LOG_INFO;

        } else {
            level = NGX_LOG_ALERT;
        }

        ngx_log_error(level, cycle->log, err, "select() failed");

        if (err == NGX_EBADF) {
            /*ngx_select_repair_fd_sets(cycle);*/
        }

        return NGX_ERROR;
    }

    if (ready == 0) {
return NGX_OK;
        if (timer != NGX_TIMER_INFINITE) {
            return NGX_OK;
        }

        ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
                      "select() returned no events without timeout");
        return NGX_ERROR;
    }

    nready = 0;

    for (i = 0; i < nevents; i++) {
        ev = event_index[i];
        c = ev->data;
        found = 0;

        if (ev->write) {
            if (FD_ISSET(c->fd, &master_write_fd_set)) {
                found = 1;
                ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                               "select write %d", c->fd);
            }

        } else {
            if (FD_ISSET(c->fd, &master_read_fd_set)) {
                found = 1;
                ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                               "select read %d", c->fd);
            }
        }

        if (found) {
            ev->ready = 1;

            queue = ev->accept ? &ngx_posted_accept_events
                               : &ngx_posted_events;

            ngx_post_event(ev, queue);

            nready++;
        }
    }

    if (ready != nready) {
        /*ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
                      "select ready != events: %d:%d", ready, nready);

        ngx_select_repair_fd_sets(cycle);*/
    }

    return NGX_OK;
}
Пример #26
0
static int schedule_common_(void *arg)
{
	thread_args_t *args = (thread_args_t *)arg;
	odp_schedule_sync_t sync;
	test_globals_t *globals;
	queue_context *qctx;
	buf_contents *bctx, *bctx_cpy;
	odp_pool_t pool;
	int locked;
	int num;
	odp_event_t ev;
	odp_buffer_t buf, buf_cpy;
	odp_queue_t from;

	globals = args->globals;
	sync = args->sync;

	pool = odp_pool_lookup(MSG_POOL_NAME);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);

	if (args->num_workers > 1)
		odp_barrier_wait(&globals->barrier);

	while (1) {
		from = ODP_QUEUE_INVALID;
		num = 0;

		odp_ticketlock_lock(&globals->lock);
		if (globals->buf_count == 0) {
			odp_ticketlock_unlock(&globals->lock);
			break;
		}
		odp_ticketlock_unlock(&globals->lock);

		if (args->enable_schd_multi) {
			odp_event_t events[BURST_BUF_SIZE],
				ev_cpy[BURST_BUF_SIZE];
			odp_buffer_t buf_cpy[BURST_BUF_SIZE];
			int j;

			num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
						 events, BURST_BUF_SIZE);
			CU_ASSERT(num >= 0);
			CU_ASSERT(num <= BURST_BUF_SIZE);
			if (num == 0)
				continue;

			if (sync == ODP_SCHED_SYNC_ORDERED) {
				int ndx;
				int ndx_max;
				int rc;

				ndx_max = odp_queue_lock_count(from);
				CU_ASSERT_FATAL(ndx_max >= 0);

				qctx = odp_queue_context(from);

				for (j = 0; j < num; j++) {
					bctx = odp_buffer_addr(
						odp_buffer_from_event
						(events[j]));

					buf_cpy[j] = odp_buffer_alloc(pool);
					CU_ASSERT_FATAL(buf_cpy[j] !=
							ODP_BUFFER_INVALID);
					bctx_cpy = odp_buffer_addr(buf_cpy[j]);
					memcpy(bctx_cpy, bctx,
					       sizeof(buf_contents));
					bctx_cpy->output_sequence =
						bctx_cpy->sequence;
					ev_cpy[j] =
						odp_buffer_to_event(buf_cpy[j]);
				}

				rc = odp_queue_enq_multi(qctx->pq_handle,
							 ev_cpy, num);
				CU_ASSERT(rc == num);

				bctx = odp_buffer_addr(
					odp_buffer_from_event(events[0]));
				for (ndx = 0; ndx < ndx_max; ndx++) {
					odp_schedule_order_lock(ndx);
					CU_ASSERT(bctx->sequence ==
						  qctx->lock_sequence[ndx]);
					qctx->lock_sequence[ndx] += num;
					odp_schedule_order_unlock(ndx);
				}
			}

			for (j = 0; j < num; j++)
				odp_event_free(events[j]);
		} else {
			ev  = odp_schedule(&from, ODP_SCHED_NO_WAIT);
			if (ev == ODP_EVENT_INVALID)
				continue;

			buf = odp_buffer_from_event(ev);
			num = 1;
			if (sync == ODP_SCHED_SYNC_ORDERED) {
				int ndx;
				int ndx_max;
				int rc;

				ndx_max = odp_queue_lock_count(from);
				CU_ASSERT_FATAL(ndx_max >= 0);

				qctx = odp_queue_context(from);
				bctx = odp_buffer_addr(buf);
				buf_cpy = odp_buffer_alloc(pool);
				CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID);
				bctx_cpy = odp_buffer_addr(buf_cpy);
				memcpy(bctx_cpy, bctx, sizeof(buf_contents));
				bctx_cpy->output_sequence = bctx_cpy->sequence;

				rc = odp_queue_enq(qctx->pq_handle,
						   odp_buffer_to_event
						   (buf_cpy));
				CU_ASSERT(rc == 0);

				for (ndx = 0; ndx < ndx_max; ndx++) {
					odp_schedule_order_lock(ndx);
					CU_ASSERT(bctx->sequence ==
						  qctx->lock_sequence[ndx]);
					qctx->lock_sequence[ndx] += num;
					odp_schedule_order_unlock(ndx);
				}
			}

			odp_buffer_free(buf);
		}

		if (args->enable_excl_atomic) {
			locked = odp_spinlock_trylock(&globals->atomic_lock);
			CU_ASSERT(locked != 0);
			CU_ASSERT(from != ODP_QUEUE_INVALID);
			if (locked) {
				int cnt;
				odp_time_t time = ODP_TIME_NULL;
				/* Do some work here to keep the thread busy */
				for (cnt = 0; cnt < 1000; cnt++)
					time = odp_time_sum(time,
							    odp_time_local());

				odp_spinlock_unlock(&globals->atomic_lock);
			}
		}

		if (sync == ODP_SCHED_SYNC_ATOMIC)
			odp_schedule_release_atomic();

		if (sync == ODP_SCHED_SYNC_ORDERED)
			odp_schedule_release_ordered();

		odp_ticketlock_lock(&globals->lock);

		globals->buf_count -= num;

		if (globals->buf_count < 0) {
			odp_ticketlock_unlock(&globals->lock);
			CU_FAIL_FATAL("Buffer counting failed");
		}

		odp_ticketlock_unlock(&globals->lock);
	}

	if (args->num_workers > 1)
		odp_barrier_wait(&globals->barrier);

	if (sync == ODP_SCHED_SYNC_ORDERED)
		locked = odp_ticketlock_trylock(&globals->lock);
	else
		locked = 0;

	if (locked && globals->buf_count_cpy > 0) {
		odp_event_t ev;
		odp_queue_t pq;
		uint64_t seq;
		uint64_t bcount = 0;
		int i, j;
		char name[32];
		uint64_t num_bufs = args->num_bufs;
		uint64_t buf_count = globals->buf_count_cpy;

		for (i = 0; i < args->num_prio; i++) {
			for (j = 0; j < args->num_queues; j++) {
				snprintf(name, sizeof(name),
					 "plain_%d_%d_o", i, j);
				pq = odp_queue_lookup(name);
				CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);

				seq = 0;
				while (1) {
					ev = odp_queue_deq(pq);

					if (ev == ODP_EVENT_INVALID) {
						CU_ASSERT(seq == num_bufs);
						break;
					}

					bctx = odp_buffer_addr(
						odp_buffer_from_event(ev));

					CU_ASSERT(bctx->sequence == seq);
					seq++;
					bcount++;
					odp_event_free(ev);
				}
			}
		}
		CU_ASSERT(bcount == buf_count);
		globals->buf_count_cpy = 0;
	}

	if (locked)
		odp_ticketlock_unlock(&globals->lock);

	/* Clear scheduler atomic / ordered context between tests */
	num = exit_schedule_loop();

	CU_ASSERT(num == 0);

	if (num)
		printf("\nDROPPED %i events\n\n", num);

	return 0;
}
Пример #27
0
Файл: queue.c Проект: nmorey/odp
void queue_test_param(void)
{
	odp_queue_t queue;
	odp_event_t enev[MAX_BUFFER_QUEUE];
	odp_event_t deev[MAX_BUFFER_QUEUE];
	odp_buffer_t buf;
	odp_event_t ev;
	odp_pool_t msg_pool;
	odp_event_t *pev_tmp;
	int i, deq_ret, ret;
	int nr_deq_entries = 0;
	int max_iteration = CONFIG_MAX_ITERATION;
	odp_queue_param_t qparams;
	odp_buffer_t enbuf;

	/* Schedule type queue */
	odp_queue_param_init(&qparams);
	qparams.type       = ODP_QUEUE_TYPE_SCHED;
	qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
	qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL;
	qparams.sched.group = ODP_SCHED_GROUP_WORKER;

	queue = odp_queue_create("test_queue", &qparams);
	CU_ASSERT(ODP_QUEUE_INVALID != queue);
	CU_ASSERT(odp_queue_to_u64(queue) !=
		  odp_queue_to_u64(ODP_QUEUE_INVALID));
	CU_ASSERT(queue == odp_queue_lookup("test_queue"));
	CU_ASSERT(ODP_QUEUE_TYPE_SCHED    == odp_queue_type(queue));
	CU_ASSERT(ODP_SCHED_PRIO_LOWEST   == odp_queue_sched_prio(queue));
	CU_ASSERT(ODP_SCHED_SYNC_PARALLEL == odp_queue_sched_type(queue));
	CU_ASSERT(ODP_SCHED_GROUP_WORKER  == odp_queue_sched_group(queue));

	CU_ASSERT(0 == odp_queue_context_set(queue, &queue_context,
					     sizeof(queue_context)));

	CU_ASSERT(&queue_context == odp_queue_context(queue));
	CU_ASSERT(odp_queue_destroy(queue) == 0);

	/* Plain type queue */
	odp_queue_param_init(&qparams);
	qparams.type        = ODP_QUEUE_TYPE_PLAIN;
	qparams.context     = &queue_context;
	qparams.context_len = sizeof(queue_context);

	queue = odp_queue_create("test_queue", &qparams);
	CU_ASSERT(ODP_QUEUE_INVALID != queue);
	CU_ASSERT(queue == odp_queue_lookup("test_queue"));
	CU_ASSERT(ODP_QUEUE_TYPE_PLAIN == odp_queue_type(queue));
	CU_ASSERT(&queue_context == odp_queue_context(queue));

	msg_pool = odp_pool_lookup("msg_pool");
	buf = odp_buffer_alloc(msg_pool);
	CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
	ev  = odp_buffer_to_event(buf);

	if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) {
		odp_buffer_free(buf);
	} else {
		CU_ASSERT(ev == odp_queue_deq(queue));
		odp_buffer_free(buf);
	}

	for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
		buf = odp_buffer_alloc(msg_pool);
		enev[i] = odp_buffer_to_event(buf);
	}

	/*
	 * odp_queue_enq_multi may return 0..n buffers due to the resource
	 * constraints in the implementation at that given point of time.
	 * But here we assume that we succeed in enqueuing all buffers.
	 */
	ret = odp_queue_enq_multi(queue, enev, MAX_BUFFER_QUEUE);
	CU_ASSERT(MAX_BUFFER_QUEUE == ret);
	i = ret < 0 ? 0 : ret;
	for ( ; i < MAX_BUFFER_QUEUE; i++)
		odp_event_free(enev[i]);

	pev_tmp = deev;
	do {
		deq_ret  = odp_queue_deq_multi(queue, pev_tmp,
					       MAX_BUFFER_QUEUE);
		nr_deq_entries += deq_ret;
		max_iteration--;
		pev_tmp += deq_ret;
		CU_ASSERT(max_iteration >= 0);
	} while (nr_deq_entries < MAX_BUFFER_QUEUE);

	for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
		enbuf = odp_buffer_from_event(enev[i]);
		CU_ASSERT(enev[i] == deev[i]);
		odp_buffer_free(enbuf);
	}

	CU_ASSERT(odp_queue_destroy(queue) == 0);
}
Пример #28
0
void queue_test_sunnydays(void)
{
	odp_queue_t queue_creat_id, queue_id;
	odp_event_t enev[MAX_BUFFER_QUEUE];
	odp_event_t deev[MAX_BUFFER_QUEUE];
	odp_buffer_t buf;
	odp_event_t ev;
	odp_pool_t msg_pool;
	odp_event_t *pev_tmp;
	int i, deq_ret, ret;
	int nr_deq_entries = 0;
	int max_iteration = CONFIG_MAX_ITERATION;
	void *prtn = NULL;
	odp_queue_param_t qparams;

	odp_queue_param_init(&qparams);
	qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
	qparams.sched.sync = ODP_SCHED_SYNC_NONE;
	qparams.sched.group = ODP_SCHED_GROUP_WORKER;

	queue_creat_id = odp_queue_create("test_queue",
					  ODP_QUEUE_TYPE_POLL, &qparams);
	CU_ASSERT(ODP_QUEUE_INVALID != queue_creat_id);

	CU_ASSERT_EQUAL(ODP_QUEUE_TYPE_POLL,
			odp_queue_type(queue_creat_id));

	queue_id = odp_queue_lookup("test_queue");
	CU_ASSERT_EQUAL(queue_creat_id, queue_id);

	CU_ASSERT_EQUAL(ODP_SCHED_GROUP_WORKER,
			odp_queue_sched_group(queue_id));
	CU_ASSERT_EQUAL(ODP_SCHED_PRIO_LOWEST, odp_queue_sched_prio(queue_id));
	CU_ASSERT_EQUAL(ODP_SCHED_SYNC_NONE, odp_queue_sched_type(queue_id));

	CU_ASSERT(0 == odp_queue_context_set(queue_id, &queue_contest));

	prtn = odp_queue_context(queue_id);
	CU_ASSERT(&queue_contest == (int *)prtn);

	msg_pool = odp_pool_lookup("msg_pool");
	buf = odp_buffer_alloc(msg_pool);
	CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
	ev  = odp_buffer_to_event(buf);

	if (!(CU_ASSERT(odp_queue_enq(queue_id, ev) == 0))) {
		odp_buffer_free(buf);
	} else {
		CU_ASSERT_EQUAL(ev, odp_queue_deq(queue_id));
		odp_buffer_free(buf);
	}

	for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
		odp_buffer_t buf = odp_buffer_alloc(msg_pool);
		enev[i] = odp_buffer_to_event(buf);
	}

	/*
	 * odp_queue_enq_multi may return 0..n buffers due to the resource
	 * constraints in the implementation at that given point of time.
	 * But here we assume that we succeed in enqueuing all buffers.
	 */
	ret = odp_queue_enq_multi(queue_id, enev, MAX_BUFFER_QUEUE);
	CU_ASSERT(MAX_BUFFER_QUEUE == ret);
	i = ret < 0 ? 0 : ret;
	for ( ; i < MAX_BUFFER_QUEUE; i++)
		odp_event_free(enev[i]);

	pev_tmp = deev;
	do {
		deq_ret  = odp_queue_deq_multi(queue_id, pev_tmp,
					       MAX_BUFFER_QUEUE);
		nr_deq_entries += deq_ret;
		max_iteration--;
		pev_tmp += deq_ret;
		CU_ASSERT(max_iteration >= 0);
	} while (nr_deq_entries < MAX_BUFFER_QUEUE);

	for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
		odp_buffer_t enbuf = odp_buffer_from_event(enev[i]);
		CU_ASSERT_EQUAL(enev[i], deev[i]);
		odp_buffer_free(enbuf);
	}

	CU_ASSERT(odp_queue_destroy(queue_id) == 0);
}
Пример #29
0
/* Basic algorithm run function for async inplace mode.
 * Creates a session from input parameters and runs one operation
 * on input_vec. Checks the output of the crypto operation against
 * output_vec. Operation completion event is dequeued polling the
 * session output queue. Completion context pointer is retrieved
 * and checked against the one set before the operation.
 * Completion event can be a separate buffer or the input packet
 * buffer can be used.
 * */
static void alg_test(enum odp_crypto_op op,
		     enum odp_cipher_alg cipher_alg,
		     odp_crypto_iv_t ses_iv,
		     uint8_t *op_iv_ptr,
		     odp_crypto_key_t cipher_key,
		     enum odp_auth_alg auth_alg,
		     odp_crypto_key_t auth_key,
		     uint8_t *input_vec,
		     unsigned int input_vec_len,
		     uint8_t *output_vec,
		     unsigned int output_vec_len)
{
	odp_crypto_session_t session;
	int rc;
	enum odp_crypto_ses_create_err status;
	odp_bool_t posted;
	odp_event_t event;
	odp_crypto_compl_t compl_event;
	odp_crypto_op_result_t result;

	/* Create a crypto session */
	odp_crypto_session_params_t ses_params;
	memset(&ses_params, 0, sizeof(ses_params));
	ses_params.op = op;
	ses_params.auth_cipher_text = false;
	ses_params.pref_mode = suite_context.pref_mode;
	ses_params.cipher_alg = cipher_alg;
	ses_params.auth_alg = auth_alg;
	ses_params.compl_queue = suite_context.queue;
	ses_params.output_pool = suite_context.pool;
	ses_params.cipher_key = cipher_key;
	ses_params.iv = ses_iv;
	ses_params.auth_key = auth_key;

	rc = odp_crypto_session_create(&ses_params, &session, &status);
	CU_ASSERT(!rc);
	CU_ASSERT(status == ODP_CRYPTO_SES_CREATE_ERR_NONE);
	CU_ASSERT(odp_crypto_session_to_u64(session) !=
		  odp_crypto_session_to_u64(ODP_CRYPTO_SESSION_INVALID));

	/* Prepare input data */
	odp_packet_t pkt = odp_packet_alloc(suite_context.pool, input_vec_len);
	CU_ASSERT(pkt != ODP_PACKET_INVALID);
	uint8_t *data_addr = odp_packet_data(pkt);
	memcpy(data_addr, input_vec, input_vec_len);
	const int data_off = 0;

	/* Prepare input/output params */
	odp_crypto_op_params_t op_params;
	memset(&op_params, 0, sizeof(op_params));
	op_params.session = session;
	op_params.pkt = pkt;
	op_params.out_pkt = pkt;
	op_params.ctx = (void *)0xdeadbeef;
	if (cipher_alg != ODP_CIPHER_ALG_NULL &&
	    auth_alg == ODP_AUTH_ALG_NULL) {
		op_params.cipher_range.offset = data_off;
		op_params.cipher_range.length = input_vec_len;
		if (op_iv_ptr)
			op_params.override_iv_ptr = op_iv_ptr;
	} else if (cipher_alg == ODP_CIPHER_ALG_NULL &&
		 auth_alg != ODP_AUTH_ALG_NULL) {
		op_params.auth_range.offset = data_off;
		op_params.auth_range.length = input_vec_len;
		op_params.hash_result_offset = data_off;
	} else {
		CU_FAIL("%s : not implemented for combined alg mode\n");
	}

	rc = odp_crypto_operation(&op_params, &posted, &result);
	if (rc < 0) {
		CU_FAIL("Failed odp_crypto_operation()");
		goto cleanup;
	}

	if (posted) {
		/* Poll completion queue for results */
		do {
			event = odp_queue_deq(suite_context.queue);
		} while (event == ODP_EVENT_INVALID);

		compl_event = odp_crypto_compl_from_event(event);
		CU_ASSERT(odp_crypto_compl_to_u64(compl_event) ==
			  odp_crypto_compl_to_u64(odp_crypto_compl_from_event(event)));
		odp_crypto_compl_result(compl_event, &result);
		odp_crypto_compl_free(compl_event);
	}

	CU_ASSERT(result.ok);
	CU_ASSERT(result.pkt == pkt);

	CU_ASSERT(!memcmp(data_addr, output_vec, output_vec_len));

	CU_ASSERT(result.ctx == (void *)0xdeadbeef);
cleanup:
	rc = odp_crypto_session_destroy(session);
	CU_ASSERT(!rc);

	odp_packet_free(pkt);
}
Пример #30
0
static void
test_packet_output_ipv6_to_gre(void)
{
	odp_packet_t pkt = ODP_PACKET_INVALID;
	odp_event_t ev;
	int res;
	struct ofp_ether_header *eth;
	struct ofp_ip6_hdr *ip6, *ip6_orig;
	struct ofp_ip *ip;
	struct ofp_greip *greip;

	(void)tcp_frame;
	(void)icmp_frame;
	(void)arp_frame;
	(void)icmp6_frame;

	if (create_odp_packet_ip6(&pkt, ip6udp_frame, sizeof(ip6udp_frame))) {
		CU_FAIL("Fail to create packet");
		return;
	}

	ip6 = odp_packet_l3_ptr(pkt, NULL);

	ofp_set_route6_params(OFP_ROUTE6_ADD, 0 /*vrf*/, 100 /*vlan*/, GRE_PORTS,
			      ip6->ip6_dst.__u6_addr.__u6_addr8, 64 /*masklen*/,
			      0 /*gw*/, OFP_RTF_NET /* flags */);

	res = ofp_ip6_output(pkt, NULL);
	CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

	res = ofp_send_pending_pkt();
	CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED);

	ev = odp_queue_deq(dev->outq_def);
	CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID);

	pkt = odp_packet_from_event(ev);
	CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt),
			      sizeof(ip6udp_frame) + 20 + 4);

	eth = odp_packet_l2_ptr(pkt, NULL);
	if (memcmp(eth->ether_dhost, tun_rem_mac, OFP_ETHER_ADDR_LEN))
		CU_FAIL("Bad destination mac address.");
	if (memcmp(eth->ether_shost, dev->mac, OFP_ETHER_ADDR_LEN))
		CU_FAIL("Bad source mac address.");

	ip = odp_packet_l3_ptr(pkt, NULL);
	CU_ASSERT_EQUAL(ip->ip_src.s_addr, dev_ip);
	CU_ASSERT_EQUAL(ip->ip_dst.s_addr, tun_rem_ip);
	CU_ASSERT_EQUAL(ip->ip_p, OFP_IPPROTO_GRE);

	greip = (struct ofp_greip *)ip;
	CU_ASSERT_EQUAL(greip->gi_g.flags, 0);
	CU_ASSERT_EQUAL(greip->gi_g.ptype,
			odp_cpu_to_be_16(OFP_ETHERTYPE_IPV6));

	/* inner ip */
	ip6 = (struct ofp_ip6_hdr *)(greip + 1);
	ip6_orig = (struct ofp_ip6_hdr *)
		(&orig_pkt_data[OFP_ETHER_HDR_LEN]);
	if (memcmp(ip6, ip6_orig,
		   odp_be_to_cpu_16(ip6_orig->ofp_ip6_plen) + sizeof(*ip6)))
		CU_FAIL("Inner IP packet error.");
}