static struct vi* __vi_alloc(int vi_id, struct net_if* net_if, int vi_set_instance, enum ef_vi_flags flags) { struct vi* vi; vi = malloc(sizeof(*vi)); vi->id = vi_id; vi->net_if = net_if; TRY(ef_driver_open(&vi->dh)); if( vi_set_instance < 0 ) { TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &net_if->pd, net_if->dh, -1, -1, -1, NULL, -1, flags)); } else { TEST(net_if->vi_set_size > 0); TEST(vi_set_instance < net_if->vi_set_size); TRY(ef_vi_alloc_from_set(&vi->vi, vi->dh, &net_if->vi_set, net_if->dh, vi_set_instance, -1, -1, -1, NULL, -1, flags)); } vi_init_pktbufs(vi); vi_init_layout(vi, flags); vi_refill_rx_ring(vi); return vi; }
/* Allocate and initialize a VI. */ static int init(const char* intf, int vi_i) { struct vi* vi = &vis[vi_i]; int i; unsigned vi_flags = EF_VI_FLAGS_DEFAULT; TRY(ef_driver_open(&vi->dh)); /* check that RX merge is supported */ if( cfg_rx_merge ) { unsigned long value; int ifindex = if_nametoindex(intf); TEST(ifindex > 0); int rc = ef_vi_capabilities_get(vi->dh, ifindex, EF_VI_CAP_RX_MERGE, &value); if( rc < 0 || ! value ) { fprintf(stderr, "WARNING: RX merge not supported on %s. Use '-c' " "option instead.\n", intf); exit(EXIT_FAILURE); } else { vi_flags |= EF_VI_RX_EVENT_MERGE; } } TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT)); TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, RX_RING_SIZE, TX_RING_SIZE, NULL, -1, vi_flags)); /* Memory for pkt buffers has already been allocated. Map it into * the VI. */ TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh, pbs.mem, pbs.mem_size)); for( i = 0; i < pbs.num; ++i ) { struct pkt_buf* pkt_buf = pkt_buf_from_id(i); pkt_buf->rx_ef_addr[vi_i] = ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF + addr_offset_from_id(i); pkt_buf->tx_ef_addr[vi_i] = ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF + ef_vi_receive_prefix_len(&vi->vi) + addr_offset_from_id(i); } /* Our pkt buffer allocation function makes assumptions on queue sizes */ assert(ef_vi_receive_capacity(&vi->vi) == RX_RING_SIZE - 1); assert(ef_vi_transmit_capacity(&vi->vi) == TX_RING_SIZE - 1); if( cfg_unidirectional && vi_i == 1 ) return 0; /* only need filter and RX fill for ingress VI */ while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE ) vi_refill_rx_ring(vi_i); ef_filter_spec fs; ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_unicast_all(&fs)); TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL)); ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_multicast_all(&fs)); TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL)); return 0; }
static void do_init(char const* interface) { enum ef_pd_flags pd_flags = EF_PD_DEFAULT; ef_filter_spec filter_spec; struct pkt_buf* pb; enum ef_vi_flags vi_flags = 0; int i; if( cfg_use_vf ) pd_flags |= EF_PD_VF; if( cfg_phys_mode ) pd_flags |= EF_PD_PHYS_MODE; if( cfg_disable_tx_push ) vi_flags |= EF_VI_TX_PUSH_DISABLE; /* Allocate virtual interface. */ TRY(ef_driver_open(&driver_handle)); if ( cfg_use_vport ) { TRY(ef_pd_alloc_with_vport(&pd, driver_handle, interface, pd_flags, EF_PD_VLAN_NONE)); } else { TRY(ef_pd_alloc_by_name(&pd, driver_handle, interface, pd_flags)); } TRY(ef_vi_alloc_from_pd(&vi, driver_handle, &pd, driver_handle, -1, -1, -1, NULL, -1, vi_flags)); ef_filter_spec_init(&filter_spec, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_ip4_local(&filter_spec, IPPROTO_UDP, sa_local.sin_addr.s_addr, sa_local.sin_port)); TRY(ef_vi_filter_add(&vi, driver_handle, &filter_spec, &filter_cookie)); { int bytes = N_BUFS * BUF_SIZE; TEST(posix_memalign(&pkt_buf_mem, CI_PAGE_SIZE, bytes) == 0); TRY(ef_memreg_alloc(&memreg, driver_handle, &pd, driver_handle, pkt_buf_mem, bytes)); for( i = 0; i < N_BUFS; ++i ) { pb = (void*) ((char*) pkt_buf_mem + i * BUF_SIZE); pb->id = i; pb->dma_buf_addr = ef_memreg_dma_addr(&memreg, i * BUF_SIZE); pb->dma_buf_addr += MEMBER_OFFSET(struct pkt_buf, dma_buf); pkt_bufs[i] = pb; } } for( i = 0; i < N_RX_BUFS; ++i ) pkt_bufs[i]->dma_buf_addr += cfg_rx_align; for( i = FIRST_TX_BUF; i < N_BUFS; ++i ) pkt_bufs[i]->dma_buf_addr += cfg_tx_align; pb = pkt_bufs[FIRST_TX_BUF]; tx_frame_len = init_udp_pkt(pb->dma_buf + cfg_tx_align, cfg_payload_len); }
static void ef_vi_init(struct client_state* cs, const char* interface) { cs->pio_pkt_len = 0; cs->pio_in_use = ! cfg_delegated; TRY( ef_driver_open(&(cs->dh)) ); TRY( ef_pd_alloc_by_name(&(cs->pd), cs->dh, interface, EF_PD_DEFAULT) ); TRY( ef_vi_alloc_from_pd(&(cs->vi), cs->dh, &(cs->pd), cs->dh, -1, 0,-1, NULL, -1, EF_VI_FLAGS_DEFAULT) ); TRY( ef_pio_alloc(&(cs->pio), cs->dh, &(cs->pd), -1, cs->dh)); TRY( ef_pio_link_vi(&(cs->pio), cs->dh, &(cs->vi), cs->dh)); }
static void do_init(int ifindex) { enum ef_pd_flags pd_flags = 0; ef_filter_spec filter_spec; struct pkt_buf* pb; enum ef_vi_flags vi_flags = 0; int i; if( cfg_use_vf ) pd_flags |= EF_PD_VF; if( cfg_phys_mode ) pd_flags |= EF_PD_PHYS_MODE; if( cfg_disable_tx_push ) vi_flags |= EF_VI_TX_PUSH_DISABLE; /* Allocate virtual interface. */ TRY(ef_driver_open(&driver_handle)); TRY(ef_pd_alloc(&pd, driver_handle, ifindex, pd_flags)); TRY(ef_vi_alloc_from_pd(&vi, driver_handle, &pd, driver_handle, -1, -1, -1, NULL, -1, vi_flags)); #ifdef __x86_64__ TRY(ef_pio_alloc(&pio, driver_handle, &pd, -1, driver_handle)); TRY(ef_pio_link_vi(&pio, driver_handle, &vi, driver_handle)); #else /* PIO is only available on x86_64 systems */ TEST(0); #endif ef_filter_spec_init(&filter_spec, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_ip4_local(&filter_spec, IPPROTO_UDP, sa_local.sin_addr.s_addr, sa_local.sin_port)); TRY(ef_vi_filter_add(&vi, driver_handle, &filter_spec, NULL)); { int bytes = (N_RX_BUFS + 1) * RX_BUF_SIZE; void* p; TEST(posix_memalign(&p, 4096, bytes) == 0); TRY(ef_memreg_alloc(&memreg, driver_handle, &pd, driver_handle, p, bytes)); for( i = 0; i <= N_RX_BUFS; ++i ) { pkt_bufs[i] = (void*) ((char*) p + i * RX_BUF_SIZE); pkt_bufs[i]->dma_buf_addr = ef_memreg_dma_addr(&memreg, i * RX_BUF_SIZE); } } for( i = 0; i <= N_RX_BUFS; ++i ) { pb = pkt_bufs[i]; pb->id = i; pb->dma_buf_addr += MEMBER_OFFSET(struct pkt_buf, dma_buf); } init_udp_pkt(pkt_bufs[N_RX_BUFS]->dma_buf, cfg_payload_len); tx_frame_len = cfg_payload_len + header_size(); }
/* Allocate and initialize a VI. */ static int init(const char* intf, int vi_i) { struct vi* vi = &vis[vi_i]; int i; TRY(ef_driver_open(&vi->dh)); TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT)); TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, -1, -1, NULL, -1, EF_VI_FLAGS_DEFAULT)); /* Memory for pkt buffers has already been allocated. Map it into * the VI. */ TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh, pbs.mem, pbs.mem_size)); for( i = 0; i < pbs.num; ++i ) { struct pkt_buf* pkt_buf = pkt_buf_from_id(i); pkt_buf->rx_ef_addr[vi_i] = ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF; pkt_buf->tx_ef_addr[vi_i] = ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF + ef_vi_receive_prefix_len(&vi->vi); pkt_buf->rx_ptr[vi_i] = (char*) pkt_buf + RX_DMA_OFF + ef_vi_receive_prefix_len(&vi->vi); } /* Our pkt buffer allocation function makes assumptions on queue sizes */ assert(ef_vi_receive_capacity(&vi->vi) == 511); assert(ef_vi_transmit_capacity(&vi->vi) == 511); while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE ) vi_refill_rx_ring(vi_i); ef_filter_spec fs; ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_unicast_all(&fs)); TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL)); ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE); TRY(ef_filter_spec_set_multicast_all(&fs)); TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL)); return 0; }