static int receive_packets(odp_queue_t pollq, odp_event_t *event_tbl, unsigned num_pkts) { int n_ev = 0; if (num_pkts == 0) return 0; if (pollq != ODP_QUEUE_INVALID) { if (num_pkts == 1) { event_tbl[0] = odp_queue_deq(pollq); n_ev = event_tbl[0] != ODP_EVENT_INVALID; } else { n_ev = odp_queue_deq_multi(pollq, event_tbl, num_pkts); } } else { if (num_pkts == 1) { event_tbl[0] = odp_schedule(NULL, ODP_SCHED_NO_WAIT); n_ev = event_tbl[0] != ODP_EVENT_INVALID; } else { n_ev = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT, event_tbl, num_pkts); } } return n_ev; }
static int schedule_common_(void *arg) { thread_args_t *args = (thread_args_t *)arg; odp_schedule_sync_t sync; test_globals_t *globals; queue_context *qctx; buf_contents *bctx, *bctx_cpy; odp_pool_t pool; int locked; int num; odp_event_t ev; odp_buffer_t buf, buf_cpy; odp_queue_t from; globals = args->globals; sync = args->sync; pool = odp_pool_lookup(MSG_POOL_NAME); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); if (args->num_workers > 1) odp_barrier_wait(&globals->barrier); while (1) { from = ODP_QUEUE_INVALID; num = 0; odp_ticketlock_lock(&globals->lock); if (globals->buf_count == 0) { odp_ticketlock_unlock(&globals->lock); break; } odp_ticketlock_unlock(&globals->lock); if (args->enable_schd_multi) { odp_event_t events[BURST_BUF_SIZE], ev_cpy[BURST_BUF_SIZE]; odp_buffer_t buf_cpy[BURST_BUF_SIZE]; int j; num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT, events, BURST_BUF_SIZE); CU_ASSERT(num >= 0); CU_ASSERT(num <= BURST_BUF_SIZE); if (num == 0) continue; if (sync == ODP_SCHED_SYNC_ORDERED) { int ndx; int ndx_max; int rc; ndx_max = odp_queue_lock_count(from); CU_ASSERT_FATAL(ndx_max >= 0); qctx = odp_queue_context(from); for (j = 0; j < num; j++) { bctx = odp_buffer_addr( odp_buffer_from_event (events[j])); buf_cpy[j] = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf_cpy[j] != ODP_BUFFER_INVALID); bctx_cpy = odp_buffer_addr(buf_cpy[j]); memcpy(bctx_cpy, bctx, sizeof(buf_contents)); bctx_cpy->output_sequence = bctx_cpy->sequence; ev_cpy[j] = odp_buffer_to_event(buf_cpy[j]); } rc = odp_queue_enq_multi(qctx->pq_handle, ev_cpy, num); CU_ASSERT(rc == num); bctx = odp_buffer_addr( odp_buffer_from_event(events[0])); for (ndx = 0; ndx < ndx_max; ndx++) { odp_schedule_order_lock(ndx); CU_ASSERT(bctx->sequence == qctx->lock_sequence[ndx]); qctx->lock_sequence[ndx] += num; odp_schedule_order_unlock(ndx); } } for (j = 0; j < num; j++) odp_event_free(events[j]); } else { ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); if (ev == ODP_EVENT_INVALID) continue; buf = odp_buffer_from_event(ev); num = 1; if (sync == ODP_SCHED_SYNC_ORDERED) { int ndx; int ndx_max; int rc; ndx_max = odp_queue_lock_count(from); CU_ASSERT_FATAL(ndx_max >= 0); qctx = odp_queue_context(from); bctx = odp_buffer_addr(buf); buf_cpy = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID); bctx_cpy = odp_buffer_addr(buf_cpy); memcpy(bctx_cpy, bctx, sizeof(buf_contents)); bctx_cpy->output_sequence = bctx_cpy->sequence; rc = odp_queue_enq(qctx->pq_handle, odp_buffer_to_event (buf_cpy)); CU_ASSERT(rc == 0); for (ndx = 0; ndx < ndx_max; ndx++) { odp_schedule_order_lock(ndx); CU_ASSERT(bctx->sequence == qctx->lock_sequence[ndx]); qctx->lock_sequence[ndx] += num; odp_schedule_order_unlock(ndx); } } odp_buffer_free(buf); } if (args->enable_excl_atomic) { locked = odp_spinlock_trylock(&globals->atomic_lock); CU_ASSERT(locked != 0); CU_ASSERT(from != ODP_QUEUE_INVALID); if (locked) { int cnt; odp_time_t time = ODP_TIME_NULL; /* Do some work here to keep the thread busy */ for (cnt = 0; cnt < 1000; cnt++) time = odp_time_sum(time, odp_time_local()); odp_spinlock_unlock(&globals->atomic_lock); } } if (sync == ODP_SCHED_SYNC_ATOMIC) odp_schedule_release_atomic(); if (sync == ODP_SCHED_SYNC_ORDERED) odp_schedule_release_ordered(); odp_ticketlock_lock(&globals->lock); globals->buf_count -= num; if (globals->buf_count < 0) { odp_ticketlock_unlock(&globals->lock); CU_FAIL_FATAL("Buffer counting failed"); } odp_ticketlock_unlock(&globals->lock); } if (args->num_workers > 1) odp_barrier_wait(&globals->barrier); if (sync == ODP_SCHED_SYNC_ORDERED) locked = odp_ticketlock_trylock(&globals->lock); else locked = 0; if (locked && globals->buf_count_cpy > 0) { odp_event_t ev; odp_queue_t pq; uint64_t seq; uint64_t bcount = 0; int i, j; char name[32]; uint64_t num_bufs = args->num_bufs; uint64_t buf_count = globals->buf_count_cpy; for (i = 0; i < args->num_prio; i++) { for (j = 0; j < args->num_queues; j++) { snprintf(name, sizeof(name), "plain_%d_%d_o", i, j); pq = odp_queue_lookup(name); CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID); seq = 0; while (1) { ev = odp_queue_deq(pq); if (ev == ODP_EVENT_INVALID) { CU_ASSERT(seq == num_bufs); break; } bctx = odp_buffer_addr( odp_buffer_from_event(ev)); CU_ASSERT(bctx->sequence == seq); seq++; bcount++; odp_event_free(ev); } } } CU_ASSERT(bcount == buf_count); globals->buf_count_cpy = 0; } if (locked) odp_ticketlock_unlock(&globals->lock); /* Clear scheduler atomic / ordered context between tests */ num = exit_schedule_loop(); CU_ASSERT(num == 0); if (num) printf("\nDROPPED %i events\n\n", num); return 0; }
int default_event_dispatcher(void *arg) { odp_event_t ev; odp_packet_t pkt; odp_queue_t in_queue; int event_idx = 0; int event_cnt = 0; ofp_pkt_processing_func pkt_func = (ofp_pkt_processing_func)arg; odp_bool_t *is_running = NULL; if (ofp_init_local()) { OFP_ERR("ofp_init_local failed"); return -1; } int rx_burst = global_param->evt_rx_burst_size; odp_event_t events[rx_burst]; is_running = ofp_get_processing_state(); if (is_running == NULL) { OFP_ERR("ofp_get_processing_state failed"); ofp_term_local(); return -1; } /* PER CORE DISPATCHER */ while (*is_running) { event_cnt = odp_schedule_multi(&in_queue, ODP_SCHED_WAIT, events, rx_burst); for (event_idx = 0; event_idx < event_cnt; event_idx++) { odp_event_type_t ev_type; ev = events[event_idx]; if (ev == ODP_EVENT_INVALID) continue; ev_type = odp_event_type(ev); if (odp_likely(ev_type == ODP_EVENT_PACKET)) { pkt = odp_packet_from_event(ev); #if 0 if (odp_unlikely(odp_packet_has_error(pkt))) { OFP_DBG("Dropping packet with error"); odp_packet_free(pkt); continue; } #endif ofp_packet_input(pkt, in_queue, pkt_func); continue; } if (ev_type == ODP_EVENT_TIMEOUT) { ofp_timer_handle(ev); continue; } OFP_ERR("Unexpected event type: %u", ev_type); odp_event_free(ev); } ofp_send_pending_pkt(); } if (ofp_term_local()) OFP_ERR("ofp_term_local failed"); return 0; }
/** * Packet IO worker thread using ODP queues * * @param arg thread arguments of type 'thread_args_t *' */ static void *pktio_queue_thread(void *arg) { odp_event_t ev_tbl[MAX_PKT_BURST]; odp_packet_t pkt_tbl[MAX_PKT_BURST]; int pkts; int thr; uint64_t wait; int dst_idx; odp_pktio_t pktio_dst; thread_args_t *thr_args = arg; stats_t *stats = thr_args->stats; thr = odp_thread_id(); printf("[%02i] QUEUE mode\n", thr); odp_barrier_wait(&barrier); wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS * 100); /* Loop packets */ while (!exit_threads) { int sent, i; unsigned tx_drops; pkts = odp_schedule_multi(NULL, wait, ev_tbl, MAX_PKT_BURST); if (pkts <= 0) continue; for (i = 0; i < pkts; i++) pkt_tbl[i] = odp_packet_from_event(ev_tbl[i]); if (gbl_args->appl.error_check) { int rx_drops; /* Drop packets with errors */ rx_drops = drop_err_pkts(pkt_tbl, pkts); if (odp_unlikely(rx_drops)) { stats->s.rx_drops += rx_drops; if (pkts == rx_drops) continue; pkts -= rx_drops; } } /* packets from the same queue are from the same interface */ dst_idx = lookup_dest_port(pkt_tbl[0]); fill_eth_addrs(pkt_tbl, pkts, dst_idx); pktio_dst = gbl_args->pktios[dst_idx]; sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts); sent = odp_unlikely(sent < 0) ? 0 : sent; tx_drops = pkts - sent; if (odp_unlikely(tx_drops)) { stats->s.tx_drops += tx_drops; /* Drop rejected packets */ for (i = sent; i < pkts; i++) odp_packet_free(pkt_tbl[i]); } stats->s.packets += pkts; } /* Make sure that latest stat writes are visible to other threads */ odp_mb_full(); return NULL; }
void *default_event_dispatcher(void *arg) { odp_event_t ev; odp_packet_t pkt; odp_queue_t in_queue; odp_event_t events[OFP_EVT_RX_BURST_SIZE]; int event_idx = 0; int event_cnt = 0; ofp_pkt_processing_func pkt_func = (ofp_pkt_processing_func)arg; odp_bool_t *is_running = NULL; #if ODP_VERSION < 106 if (odp_init_local(ODP_THREAD_WORKER)) { OFP_ERR("odp_init_local failed"); return NULL; } #endif if (ofp_init_local()) { OFP_ERR("ofp_init_local failed"); return NULL; } is_running = ofp_get_processing_state(); if (is_running == NULL) { OFP_ERR("ofp_get_processing_state failed"); ofp_term_local(); return NULL; } /* PER CORE DISPATCHER */ while (*is_running) { event_cnt = odp_schedule_multi(&in_queue, ODP_SCHED_WAIT, events, OFP_EVT_RX_BURST_SIZE); for (event_idx = 0; event_idx < event_cnt; event_idx++) { ev = events[event_idx]; if (ev == ODP_EVENT_INVALID) continue; if (odp_event_type(ev) == ODP_EVENT_TIMEOUT) { ofp_timer_handle(ev); continue; } if (odp_event_type(ev) == ODP_EVENT_PACKET) { pkt = odp_packet_from_event(ev); #if 0 if (odp_unlikely(odp_packet_has_error(pkt))) { OFP_DBG("Dropping packet with error"); odp_packet_free(pkt); continue; } #endif ofp_packet_input(pkt, in_queue, pkt_func); continue; } OFP_ERR("Unexpected event type: %u", odp_event_type(ev)); /* Free events by type */ if (odp_event_type(ev) == ODP_EVENT_BUFFER) { odp_buffer_free(odp_buffer_from_event(ev)); continue; } if (odp_event_type(ev) == ODP_EVENT_CRYPTO_COMPL) { odp_crypto_compl_free( odp_crypto_compl_from_event(ev)); continue; } } ofp_send_pending_pkt(); } if (ofp_term_local()) OFP_ERR("ofp_term_local failed"); return NULL; }