static struct vi* __vi_alloc(int vi_id, struct net_if* net_if, int vi_set_instance, enum ef_vi_flags flags) { struct vi* vi; vi = malloc(sizeof(*vi)); vi->id = vi_id; vi->net_if = net_if; TRY(ef_driver_open(&vi->dh)); if( vi_set_instance < 0 ) { TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &net_if->pd, net_if->dh, -1, -1, -1, NULL, -1, flags)); } else { TEST(net_if->vi_set_size > 0); TEST(vi_set_instance < net_if->vi_set_size); TRY(ef_vi_alloc_from_set(&vi->vi, vi->dh, &net_if->vi_set, net_if->dh, vi_set_instance, -1, -1, -1, NULL, -1, flags)); } vi_init_pktbufs(vi); vi_init_layout(vi, flags); vi_refill_rx_ring(vi); return vi; }
/* Allocate and initialize a VI. */ static int init_vi(struct vi_state* vi_state) { int i; TRY(ef_vi_alloc_from_set(&vi_state->vi, dh, &vi_set, dh, -1, -1, -1, 0, NULL, -1, EF_VI_FLAGS_DEFAULT)); /* The VI has just an RXQ with default capacity of 512 */ vi_state->num = 512; vi_state->mem_size = vi_state->num * PKT_BUF_SIZE; vi_state->mem_size = ROUND_UP(vi_state->mem_size, huge_page_size); /* Allocate huge-page-aligned memory to give best chance of allocating * transparent huge-pages. */ TEST(posix_memalign(&vi_state->mem, huge_page_size, vi_state->mem_size) == 0); TRY(ef_memreg_alloc(&vi_state->memreg, dh, &pd, dh, vi_state->mem, vi_state->mem_size)); for( i = 0; i < vi_state->num; ++i ) { struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, i); pkt_buf->rx_ef_addr = ef_memreg_dma_addr(&vi_state->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF; pkt_buf->rx_ptr = (char*) pkt_buf + RX_DMA_OFF + ef_vi_receive_prefix_len(&vi_state->vi); pkt_buf_free(vi_state, pkt_buf); } /* Our pkt buffer allocation function makes assumptions on queue sizes */ assert(ef_vi_receive_capacity(&vi_state->vi) == 511); while( ef_vi_receive_space(&vi_state->vi) > REFILL_BATCH_SIZE ) vi_refill_rx_ring(vi_state); return 0; }