예제 #1
0
/* Allocate and initialize a VI. */
static int init(const char* intf, int vi_i)
{
  struct vi* vi = &vis[vi_i];
  int i;
  unsigned vi_flags = EF_VI_FLAGS_DEFAULT;

  TRY(ef_driver_open(&vi->dh));
  /* check that RX merge is supported */
  if( cfg_rx_merge ) {
    unsigned long value;
    int ifindex = if_nametoindex(intf);
    TEST(ifindex > 0);
    int rc = ef_vi_capabilities_get(vi->dh, ifindex, EF_VI_CAP_RX_MERGE, &value);
    if( rc < 0 || ! value ) {
      fprintf(stderr, "WARNING: RX merge not supported on %s. Use '-c' "
              "option instead.\n", intf);
      exit(EXIT_FAILURE);
    }
    else {
      vi_flags |= EF_VI_RX_EVENT_MERGE;
    }
  }
  TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT));
  TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, RX_RING_SIZE,
                          TX_RING_SIZE, NULL, -1, vi_flags));


  /* Memory for pkt buffers has already been allocated.  Map it into
   * the VI. */
  TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh,
                      pbs.mem, pbs.mem_size));
  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->rx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF
      + addr_offset_from_id(i);
    pkt_buf->tx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi) + addr_offset_from_id(i);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi->vi) == RX_RING_SIZE - 1);
  assert(ef_vi_transmit_capacity(&vi->vi) == TX_RING_SIZE - 1);

  if( cfg_unidirectional && vi_i == 1 )
    return 0; /* only need filter and RX fill for ingress VI */

  while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_i);

  ef_filter_spec fs;
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_unicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_multicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  return 0;
}
예제 #2
0
/* Allocate and initialize a VI. */
static int init_vi(struct vi_state* vi_state)
{
  int i;
  TRY(ef_vi_alloc_from_set(&vi_state->vi, dh, &vi_set, dh, -1, -1, -1, 0, NULL,
                          -1, EF_VI_FLAGS_DEFAULT));

  /* The VI has just an RXQ with default capacity of 512 */
  vi_state->num = 512;
  vi_state->mem_size = vi_state->num * PKT_BUF_SIZE;
  vi_state->mem_size = ROUND_UP(vi_state->mem_size, huge_page_size);
  /* Allocate huge-page-aligned memory to give best chance of allocating
   * transparent huge-pages.
   */
  TEST(posix_memalign(&vi_state->mem, huge_page_size, vi_state->mem_size) == 0);
  TRY(ef_memreg_alloc(&vi_state->memreg, dh, &pd, dh, vi_state->mem,
                      vi_state->mem_size));

  for( i = 0; i < vi_state->num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, i);
    pkt_buf->rx_ef_addr =
      ef_memreg_dma_addr(&vi_state->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF;
    pkt_buf->rx_ptr = (char*) pkt_buf + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi_state->vi);
    pkt_buf_free(vi_state, pkt_buf);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi_state->vi) == 511);

  while( ef_vi_receive_space(&vi_state->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_state);

  return 0;
}
예제 #3
0
static void do_init(char const* interface)
{
  enum ef_pd_flags pd_flags = EF_PD_DEFAULT;
  ef_filter_spec filter_spec;
  struct pkt_buf* pb;
  enum ef_vi_flags vi_flags = 0;
  int i;

  if( cfg_use_vf )
    pd_flags |= EF_PD_VF;
  if( cfg_phys_mode )
    pd_flags |= EF_PD_PHYS_MODE;
  if( cfg_disable_tx_push )
    vi_flags |= EF_VI_TX_PUSH_DISABLE;

  /* Allocate virtual interface. */
  TRY(ef_driver_open(&driver_handle));
  if ( cfg_use_vport ) {
    TRY(ef_pd_alloc_with_vport(&pd, driver_handle, interface, pd_flags,
                               EF_PD_VLAN_NONE));
  } else {
    TRY(ef_pd_alloc_by_name(&pd, driver_handle, interface, pd_flags));
  }
  TRY(ef_vi_alloc_from_pd(&vi, driver_handle, &pd, driver_handle,
                          -1, -1, -1, NULL, -1, vi_flags));

  ef_filter_spec_init(&filter_spec, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_ip4_local(&filter_spec, IPPROTO_UDP,
                                   sa_local.sin_addr.s_addr,
                                   sa_local.sin_port));
  TRY(ef_vi_filter_add(&vi, driver_handle, &filter_spec, &filter_cookie));

  {
    int bytes = N_BUFS * BUF_SIZE;
    TEST(posix_memalign(&pkt_buf_mem, CI_PAGE_SIZE, bytes) == 0);
    TRY(ef_memreg_alloc(&memreg, driver_handle, &pd, driver_handle, pkt_buf_mem,
                        bytes));
    for( i = 0; i < N_BUFS; ++i ) {
      pb = (void*) ((char*) pkt_buf_mem + i * BUF_SIZE);
      pb->id = i;
      pb->dma_buf_addr = ef_memreg_dma_addr(&memreg, i * BUF_SIZE);
      pb->dma_buf_addr += MEMBER_OFFSET(struct pkt_buf, dma_buf);
      pkt_bufs[i] = pb;
    }
  }

  for( i = 0; i < N_RX_BUFS; ++i )
    pkt_bufs[i]->dma_buf_addr += cfg_rx_align;
  for( i = FIRST_TX_BUF; i < N_BUFS; ++i )
    pkt_bufs[i]->dma_buf_addr += cfg_tx_align;

  pb = pkt_bufs[FIRST_TX_BUF];
  tx_frame_len = init_udp_pkt(pb->dma_buf + cfg_tx_align, cfg_payload_len);
}
예제 #4
0
파일: efvi_sfw.c 프로젝트: ido/openonload
static void pkt_buf_init(struct vi* vi, int pkt_buf_i)
{
  struct pkt_buf* pkt_buf;
  pkt_buf = pkt_buf_from_id(vi, pkt_buf_i);
  pkt_buf->vi_owner = vi;
  pkt_buf->addr[vi->net_if->id] =
    ef_memreg_dma_addr(&vi->memreg, pkt_buf_i * PKT_BUF_SIZE);
  pkt_buf->id = pkt_buf_i;
  pkt_buf->n_refs = 0;
  pkt_buf_free(pkt_buf);
}
예제 #5
0
파일: efpio.c 프로젝트: davenso/openonload
static void do_init(int ifindex)
{
  enum ef_pd_flags pd_flags = 0;
  ef_filter_spec filter_spec;
  struct pkt_buf* pb;
  enum ef_vi_flags vi_flags = 0;
  int i;

  if( cfg_use_vf )
    pd_flags |= EF_PD_VF;
  if( cfg_phys_mode )
    pd_flags |= EF_PD_PHYS_MODE;
  if( cfg_disable_tx_push )
    vi_flags |= EF_VI_TX_PUSH_DISABLE;

  /* Allocate virtual interface. */
  TRY(ef_driver_open(&driver_handle));
  TRY(ef_pd_alloc(&pd, driver_handle, ifindex, pd_flags));
  TRY(ef_vi_alloc_from_pd(&vi, driver_handle, &pd, driver_handle,
                          -1, -1, -1, NULL, -1, vi_flags));
#ifdef __x86_64__
  TRY(ef_pio_alloc(&pio, driver_handle, &pd, -1, driver_handle));
  TRY(ef_pio_link_vi(&pio, driver_handle, &vi, driver_handle));
#else
  /* PIO is only available on x86_64 systems */
  TEST(0);
#endif

  ef_filter_spec_init(&filter_spec, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_ip4_local(&filter_spec, IPPROTO_UDP,
                                   sa_local.sin_addr.s_addr,
                                   sa_local.sin_port));
  TRY(ef_vi_filter_add(&vi, driver_handle, &filter_spec, NULL));

  {
    int bytes = (N_RX_BUFS + 1) * RX_BUF_SIZE;
    void* p;
    TEST(posix_memalign(&p, 4096, bytes) == 0);
    TRY(ef_memreg_alloc(&memreg, driver_handle, &pd, driver_handle, p, bytes));
    for( i = 0; i <= N_RX_BUFS; ++i ) {
      pkt_bufs[i] = (void*) ((char*) p + i * RX_BUF_SIZE);
      pkt_bufs[i]->dma_buf_addr = ef_memreg_dma_addr(&memreg, i * RX_BUF_SIZE);
    }
  }

  for( i = 0; i <= N_RX_BUFS; ++i ) {
    pb = pkt_bufs[i];
    pb->id = i;
    pb->dma_buf_addr += MEMBER_OFFSET(struct pkt_buf, dma_buf);
  }

  init_udp_pkt(pkt_bufs[N_RX_BUFS]->dma_buf, cfg_payload_len);
  tx_frame_len = cfg_payload_len + header_size();
}
예제 #6
0
/* Allocate and initialize a VI. */
static int init(const char* intf, int vi_i)
{
  struct vi* vi = &vis[vi_i];
  int i;
  TRY(ef_driver_open(&vi->dh));
  TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT));
  TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, -1, -1, NULL,
                          -1, EF_VI_FLAGS_DEFAULT));

  /* Memory for pkt buffers has already been allocated.  Map it into
   * the VI. */
  TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh,
                      pbs.mem, pbs.mem_size));
  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->rx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF;
    pkt_buf->tx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi);
    pkt_buf->rx_ptr[vi_i] = (char*) pkt_buf + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi->vi) == 511);
  assert(ef_vi_transmit_capacity(&vi->vi) == 511);

  while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_i);

  ef_filter_spec fs;
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_unicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_multicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  return 0;
}
예제 #7
0
파일: efvi_sfw.c 프로젝트: ido/openonload
void net_if_map_vi_pool(struct net_if* net_if, struct vi* vi)
{
  struct pkt_buf* pkt_buf;
  ef_memreg memreg;
  int i;

  /* If this fails it means you've tried to map buffers into a protection
   * domain that has already mapped those buffers.
   */
  TEST(vi->net_if != net_if);

  TRY(ef_memreg_alloc(&memreg, net_if->dh, &net_if->pd, net_if->dh,
                      vi->pkt_bufs, vi->pkt_bufs_n * PKT_BUF_SIZE));
  for( i = 0; i < vi->pkt_bufs_n; ++i ) {
    pkt_buf = pkt_buf_from_id(vi, i);
    pkt_buf->addr[net_if->id] = ef_memreg_dma_addr(&memreg, i * PKT_BUF_SIZE);
  }
}