/* Allocate and initialize a VI. */ static int init(const char* intf, int vi_i) { struct vi* vi = &vis[vi_i]; int i; unsigned vi_flags = EF_VI_FLAGS_DEFAULT; TRY(ef_driver_open(&vi->dh)); /* check that RX merge is supported */ if( cfg_rx_merge ) { unsigned long value; int ifindex = if_nametoindex(intf); TEST(ifindex > 0); int rc = ef_vi_capabilities_get(vi->dh, ifindex, EF_VI_CAP_RX_MERGE, &value); if( rc < 0 || ! value ) { fprintf(stderr, "WARNING: RX merge not supported on %s. Use '-c' " "option instead.\n", intf); exit(EXIT_FAILURE); } else { vi_flags |= EF_VI_RX_EVENT_MERGE; } } TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT)); TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, RX_RING_SIZE, TX_RING_SIZE, NULL, -1, vi_flags)); /* Memory for pkt buffers has already been allocated. Map it into * the VI. */ TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh, pbs.mem, pbs.mem_size)); for( i = 0; i < pbs.num; ++i ) { struct pkt_buf* pkt_buf = pkt_buf_from_id(i); pkt_buf->rx_ef_addr[vi_i] = ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF + addr_offset_from_id(i); pkt_buf->tx_ef_addr[vi_i] = ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF + ef_vi_receive_prefix_len(&vi->vi) + addr_offset_from_id(i); } /* Our pkt buffer allocation function makes assumptions on queue sizes */ assert(ef_vi_receive_capacity(&vi->vi) == RX_RING_SIZE - 1); assert(ef_vi_transmit_capacity(&vi->vi) == TX_RING_SIZE - 1); if( cfg_unidirectional && vi_i == 1 ) return 0; /* only need filter and RX fill for ingress VI */ while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE ) vi_refill_rx_ring(vi_i); ef_filter_spec fs; ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_unicast_all(&fs)); TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL)); ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_multicast_all(&fs)); TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL)); return 0; }
/* Allocate and initialize a VI. */ static int init(const char* intf, int vi_i) { struct vi* vi = &vis[vi_i]; int i; TRY(ef_driver_open(&vi->dh)); TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT)); TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, -1, -1, NULL, -1, EF_VI_FLAGS_DEFAULT)); /* Memory for pkt buffers has already been allocated. Map it into * the VI. */ TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh, pbs.mem, pbs.mem_size)); for( i = 0; i < pbs.num; ++i ) { struct pkt_buf* pkt_buf = pkt_buf_from_id(i); pkt_buf->rx_ef_addr[vi_i] = ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF; pkt_buf->tx_ef_addr[vi_i] = ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF + ef_vi_receive_prefix_len(&vi->vi); pkt_buf->rx_ptr[vi_i] = (char*) pkt_buf + RX_DMA_OFF + ef_vi_receive_prefix_len(&vi->vi); } /* Our pkt buffer allocation function makes assumptions on queue sizes */ assert(ef_vi_receive_capacity(&vi->vi) == 511); assert(ef_vi_transmit_capacity(&vi->vi) == 511); while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE ) vi_refill_rx_ring(vi_i); ef_filter_spec fs; ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_unicast_all(&fs)); TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL)); ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_multicast_all(&fs)); TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL)); return 0; }