static void *pkt_io_recv(void *arg) { odp_pktin_queue_t pktin; odp_packet_t pkt, pkt_tbl[PKT_BURST_SIZE]; int pkt_idx, pkt_cnt; struct pktio_thr_arg *thr_args; ofp_pkt_processing_func pkt_func; thr_args = arg; pkt_func = thr_args->pkt_func; pktin = thr_args->pktin; if (ofp_init_local()) { OFP_ERR("Error: OFP local init failed.\n"); return NULL; } OFP_DBG("PKT-IO receive starting on cpu: %d", odp_cpu_id()); while (1) { pkt_cnt = odp_pktin_recv(pktin, pkt_tbl, PKT_BURST_SIZE); for (pkt_idx = 0; pkt_idx < pkt_cnt; pkt_idx++) { pkt = pkt_tbl[pkt_idx]; if (odp_unlikely(odp_packet_has_error(pkt))) { OFP_DBG("Packet with error dropped.\n"); odp_packet_free(pkt); continue; } ofp_packet_input(pkt, ODP_QUEUE_INVALID, pkt_func); } ofp_send_pending_pkt(); } /* Never reached */ return NULL; }
static int ipc_second_process(void) { odp_pktio_t ipc_pktio; odp_pool_param_t params; odp_pool_t pool; odp_packet_t pkt_tbl[MAX_PKT_BURST]; odp_packet_t alloc_pkt; int pkts; int ret; int i; odp_time_t start_cycle; odp_time_t cycle; odp_time_t diff; odp_time_t wait; uint64_t stat_pkts = 0; odp_pktin_queue_t pktin; /* Create packet pool */ memset(¶ms, 0, sizeof(params)); params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; params.pkt.len = SHM_PKT_POOL_BUF_SIZE; params.pkt.num = SHM_PKT_POOL_SIZE; params.type = ODP_POOL_PACKET; pool = odp_pool_create("packet_pool2", ¶ms); if (pool == ODP_POOL_INVALID) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } ipc_pktio = create_pktio(pool); wait = odp_time_local_from_ns(run_time_sec * ODP_TIME_SEC_IN_NS); start_cycle = odp_time_local(); if (odp_pktin_queue(ipc_pktio, &pktin, 1) != 1) { EXAMPLE_ERR("no input queue\n"); return -1; } /* start ipc pktio, i.e. wait until other process connects */ for (;;) { /* 1. exit loop if time specified */ if (run_time_sec) { cycle = odp_time_local(); diff = odp_time_diff(cycle, start_cycle); if (odp_time_cmp(wait, diff) < 0) { printf("timeout exit, run_time_sec %d\n", run_time_sec); goto not_started; } } ret = odp_pktio_start(ipc_pktio); if (!ret) break; } for (;;) { /* exit loop if time specified */ if (run_time_sec) { cycle = odp_time_local(); diff = odp_time_diff(cycle, start_cycle); if (odp_time_cmp(wait, diff) < 0) { EXAMPLE_DBG("exit after %d seconds\n", run_time_sec); break; } } /* recv some packets and change MAGIC to MAGIC_2 */ pkts = odp_pktin_recv(pktin, pkt_tbl, MAX_PKT_BURST); if (pkts <= 0) continue; for (i = 0; i < pkts; i++) { odp_packet_t pkt = pkt_tbl[i]; pkt_head_t head; size_t off; off = odp_packet_l4_offset(pkt); if (off == ODP_PACKET_OFFSET_INVALID) EXAMPLE_ABORT("invalid l4 offset\n"); off += ODPH_UDPHDR_LEN; ret = odp_packet_copy_to_mem(pkt, off, sizeof(head), &head); if (ret) EXAMPLE_ABORT("unable copy out head data"); if (head.magic != TEST_SEQ_MAGIC) EXAMPLE_ABORT("Wrong head magic!"); /* Modify magic number in packet */ head.magic = TEST_SEQ_MAGIC_2; ret = odp_packet_copy_from_mem(pkt, off, sizeof(head), &head); if (ret) EXAMPLE_ABORT("unable to copy in head data"); } /* send all packets back */ ret = ipc_odp_packet_send_or_free(ipc_pktio, pkt_tbl, pkts); if (ret < 0) EXAMPLE_ABORT("can not send packets\n"); stat_pkts += ret; /* alloc packet from local pool, set magic to ALLOC_MAGIC, * and send it.*/ alloc_pkt = odp_packet_alloc(pool, SHM_PKT_POOL_BUF_SIZE); if (alloc_pkt != ODP_PACKET_INVALID) { pkt_head_t head; size_t off; odp_packet_l4_offset_set(alloc_pkt, 30); head.magic = TEST_ALLOC_MAGIC; off = odp_packet_l4_offset(alloc_pkt); off += ODPH_UDPHDR_LEN; ret = odp_packet_copy_from_mem(alloc_pkt, off, sizeof(head), &head); if (ret) EXAMPLE_ABORT("unable to copy in head data"); pkt_tbl[0] = alloc_pkt; ret = ipc_odp_packet_send_or_free(ipc_pktio, pkt_tbl, 1); if (ret < 0) EXAMPLE_ABORT("can not send packets\n"); stat_pkts += 1; } } /* cleanup and exit */ ret = odp_pktio_stop(ipc_pktio); if (ret) { EXAMPLE_DBG("ipc2: odp_pktio_stop error %d\n", ret); return -1; } not_started: ret = odp_pktio_close(ipc_pktio); if (ret) { EXAMPLE_DBG("ipc2: odp_pktio_close error %d\n", ret); return -1; } ret = odp_pool_destroy(pool); if (ret) EXAMPLE_DBG("ipc2: pool_destroy error %d\n", ret); return stat_pkts > 1000 ? 0 : -1; }
static ngx_int_t ngx_select_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags) { odp_packet_t pkts[OFP_PKT_RX_BURST_SIZE]; odp_packet_t odp_pkt; int pkt_cnt = 0; int pkt_idx = 0; pkt_cnt = odp_pktin_recv(in_queue, pkts, OFP_PKT_RX_BURST_SIZE); for (pkt_idx = 0; pkt_idx < pkt_cnt; pkt_idx++) { odp_pkt = pkts[pkt_idx]; ofp_packet_input(odp_pkt, ODP_QUEUE_INVALID, ofp_eth_vlan_processing); } if (pkt_cnt < 1) ofp_send_pending_pkt(); static uint32_t count = 0; /*odp_queue_deq has a lock that impacts performance*/ if (count ++ > 50) { count = 0; odp_queue_t timer_queue = ofp_timer_queue_cpu(-1); odp_event_t event = odp_queue_deq(timer_queue); if (event != ODP_EVENT_INVALID) { if (odp_event_type(event) == ODP_EVENT_TIMEOUT) { ofp_timer_handle(event); } else { odp_buffer_free(odp_buffer_from_event(event)); } } } #if OFP_NOTIFY return NGX_OK; #endif int ready, nready; ngx_err_t err; ngx_uint_t i, found; ngx_event_t *ev; ngx_queue_t *queue; struct timeval tv, *tp; ngx_connection_t *c; if (max_fd == -1) { for (i = 0; i < nevents; i++) { c = event_index[i]->data; ngx_log_debug(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "change max_fd: %i, c->fd : %d", max_fd, c->fd); if (max_fd < c->fd) { max_fd = c->fd; } } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "change max_fd: %i", max_fd); } #if (NGX_DEBUG) if (cycle->log->log_level & NGX_LOG_DEBUG_ALL) { for (i = 0; i < nevents; i++) { ev = event_index[i]; c = ev->data; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select event: fd:%d wr:%d", c->fd, ev->write); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "max_fd: %i", max_fd); } #endif if (timer == NGX_TIMER_INFINITE) { tp = NULL; } else { tv.tv_sec = (long) (timer / 1000); tv.tv_usec = (long) ((timer % 1000) * 1000); tp = &tv; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select timer: %M", timer); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select max_fd %d, tp %p ", max_fd, tp); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, " master %p : work %p", (ofp_fd_set *)&master_read_fd_set, (ofp_fd_set *)&work_read_fd_set); ready = select(max_fd + 1, &master_read_fd_set, &master_write_fd_set, NULL, tp); err = (ready == -1) ? ngx_errno : 0; if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) { ngx_time_update(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select ready %d", ready); if (err) { ngx_uint_t level; if (err == NGX_EINTR) { if (ngx_event_timer_alarm) { ngx_event_timer_alarm = 0; return NGX_OK; } level = NGX_LOG_INFO; } else { level = NGX_LOG_ALERT; } ngx_log_error(level, cycle->log, err, "select() failed"); if (err == NGX_EBADF) { /*ngx_select_repair_fd_sets(cycle);*/ } return NGX_ERROR; } if (ready == 0) { return NGX_OK; if (timer != NGX_TIMER_INFINITE) { return NGX_OK; } ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "select() returned no events without timeout"); return NGX_ERROR; } nready = 0; for (i = 0; i < nevents; i++) { ev = event_index[i]; c = ev->data; found = 0; if (ev->write) { if (FD_ISSET(c->fd, &master_write_fd_set)) { found = 1; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select write %d", c->fd); } } else { if (FD_ISSET(c->fd, &master_read_fd_set)) { found = 1; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select read %d", c->fd); } } if (found) { ev->ready = 1; queue = ev->accept ? &ngx_posted_accept_events : &ngx_posted_events; ngx_post_event(ev, queue); nready++; } } if (ready != nready) { /*ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "select ready != events: %d:%d", ready, nready); ngx_select_repair_fd_sets(cycle);*/ } return NGX_OK; }
/** * Switch worker thread * * @param arg Thread arguments of type 'thread_args_t *' */ static int run_worker(void *arg) { thread_args_t *thr_args = arg; odp_packet_t pkt_tbl[MAX_PKT_BURST]; odp_pktin_queue_t pktin; odp_pktout_queue_t pktout; unsigned num_pktio; unsigned pktio = 0; uint8_t port_in; uint8_t port_out; int pkts; num_pktio = thr_args->num_rx_pktio; pktin = thr_args->rx_pktio[pktio].pktin; port_in = thr_args->rx_pktio[pktio].port_idx; odp_barrier_wait(&barrier); while (!exit_threads) { int sent; unsigned drops; if (num_pktio > 1) { pktin = thr_args->rx_pktio[pktio].pktin; port_in = thr_args->rx_pktio[pktio].port_idx; pktio++; if (pktio == num_pktio) pktio = 0; } pkts = odp_pktin_recv(pktin, pkt_tbl, MAX_PKT_BURST); if (odp_unlikely(pkts <= 0)) continue; thr_args->stats[port_in]->s.rx_packets += pkts; /* Sort packets to thread local tx buffers */ forward_packets(pkt_tbl, pkts, thr_args, port_in); /* Empty all thread local tx buffers */ for (port_out = 0; port_out < gbl_args->appl.if_count; port_out++) { unsigned tx_pkts; odp_packet_t *tx_pkt_tbl; if (port_out == port_in || thr_args->tx_pktio[port_out].buf.len == 0) continue; tx_pkts = thr_args->tx_pktio[port_out].buf.len; thr_args->tx_pktio[port_out].buf.len = 0; tx_pkt_tbl = thr_args->tx_pktio[port_out].buf.pkt; pktout = thr_args->tx_pktio[port_out].pktout; sent = odp_pktout_send(pktout, tx_pkt_tbl, tx_pkts); sent = odp_unlikely(sent < 0) ? 0 : sent; thr_args->stats[port_out]->s.tx_packets += sent; drops = tx_pkts - sent; if (odp_unlikely(drops)) { unsigned i; thr_args->stats[port_out]->s.tx_drops += drops; /* Drop rejected packets */ for (i = sent; i < tx_pkts; i++) odp_packet_free(tx_pkt_tbl[i]); } } } /* Make sure that latest stat writes are visible to other threads */ odp_mb_full(); return 0; }