Beispiel #1
0
/* Allocate and initialize a VI. */
static int init_vi(struct vi_state* vi_state)
{
  int i;
  TRY(ef_vi_alloc_from_set(&vi_state->vi, dh, &vi_set, dh, -1, -1, -1, 0, NULL,
                          -1, EF_VI_FLAGS_DEFAULT));

  /* The VI has just an RXQ with default capacity of 512 */
  vi_state->num = 512;
  vi_state->mem_size = vi_state->num * PKT_BUF_SIZE;
  vi_state->mem_size = ROUND_UP(vi_state->mem_size, huge_page_size);
  /* Allocate huge-page-aligned memory to give best chance of allocating
   * transparent huge-pages.
   */
  TEST(posix_memalign(&vi_state->mem, huge_page_size, vi_state->mem_size) == 0);
  TRY(ef_memreg_alloc(&vi_state->memreg, dh, &pd, dh, vi_state->mem,
                      vi_state->mem_size));

  for( i = 0; i < vi_state->num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, i);
    pkt_buf->rx_ef_addr =
      ef_memreg_dma_addr(&vi_state->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF;
    pkt_buf->rx_ptr = (char*) pkt_buf + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi_state->vi);
    pkt_buf_free(vi_state, pkt_buf);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi_state->vi) == 511);

  while( ef_vi_receive_space(&vi_state->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_state);

  return 0;
}
Beispiel #2
0
/* Allocate and initialize a VI. */
static int init(const char* intf, int vi_i)
{
  struct vi* vi = &vis[vi_i];
  int i;
  unsigned vi_flags = EF_VI_FLAGS_DEFAULT;

  TRY(ef_driver_open(&vi->dh));
  /* check that RX merge is supported */
  if( cfg_rx_merge ) {
    unsigned long value;
    int ifindex = if_nametoindex(intf);
    TEST(ifindex > 0);
    int rc = ef_vi_capabilities_get(vi->dh, ifindex, EF_VI_CAP_RX_MERGE, &value);
    if( rc < 0 || ! value ) {
      fprintf(stderr, "WARNING: RX merge not supported on %s. Use '-c' "
              "option instead.\n", intf);
      exit(EXIT_FAILURE);
    }
    else {
      vi_flags |= EF_VI_RX_EVENT_MERGE;
    }
  }
  TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT));
  TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, RX_RING_SIZE,
                          TX_RING_SIZE, NULL, -1, vi_flags));


  /* Memory for pkt buffers has already been allocated.  Map it into
   * the VI. */
  TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh,
                      pbs.mem, pbs.mem_size));
  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->rx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF
      + addr_offset_from_id(i);
    pkt_buf->tx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi) + addr_offset_from_id(i);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi->vi) == RX_RING_SIZE - 1);
  assert(ef_vi_transmit_capacity(&vi->vi) == TX_RING_SIZE - 1);

  if( cfg_unidirectional && vi_i == 1 )
    return 0; /* only need filter and RX fill for ingress VI */

  while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_i);

  ef_filter_spec fs;
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_unicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_multicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  return 0;
}
Beispiel #3
0
/* Try to refill the RXQ on the given VI with at most
 * REFILL_BATCH_SIZE packets if it has enough space and we have
 * enough free buffers. */
static void vi_refill_rx_ring(struct vi_state* vi_state)
{
  ef_vi* vi = &vi_state->vi;
#define REFILL_BATCH_SIZE  16
  struct pkt_buf* pkt_buf;
  int i;

  if( ef_vi_receive_space(vi) < REFILL_BATCH_SIZE ||
      vi_state->free_pool_n < REFILL_BATCH_SIZE )
    return;

  for( i = 0; i < REFILL_BATCH_SIZE; ++i ) {
    pkt_buf = vi_state->free_pool;
    vi_state->free_pool = vi_state->free_pool->next;
    --vi_state->free_pool_n;
    ef_vi_receive_init(vi, pkt_buf->rx_ef_addr, pkt_buf->id);
  }
  ef_vi_receive_push(vi);
}
Beispiel #4
0
/* Try to refill the RXQ on the given VI with at most
 * REFILL_BATCH_SIZE packets if it has enough space and we have
 * enough free buffers. */
static void vi_refill_rx_ring(int vi_i)
{
  ef_vi* vi = &vis[vi_i].vi;
#define REFILL_BATCH_SIZE  64
  struct pkt_buf* pkt_buf;
  int i;

  if( ef_vi_receive_space(vi) < REFILL_BATCH_SIZE ||
      pbs.free_pool_n < REFILL_BATCH_SIZE )
    return;

  for( i = 0; i < REFILL_BATCH_SIZE; ++i ) {
    pkt_buf = pbs.free_pool;
    pbs.free_pool = pbs.free_pool->next;
    --pbs.free_pool_n;
    ef_vi_receive_init(vi, pkt_buf->rx_ef_addr[vi_i], pkt_buf->id);
  }
  ef_vi_receive_push(vi);
}
Beispiel #5
0
void vi_refill_rx_ring(struct vi* vi)
{
#define REFILL_BATCH_SIZE  16
  struct pkt_buf* pkt_buf;
  int i;

  if( ef_vi_receive_space(&vi->vi) >= REFILL_BATCH_SIZE &&
      vi->free_pkt_bufs_n >= REFILL_BATCH_SIZE ) {
    for( i = 0; i < REFILL_BATCH_SIZE; ++i ) {
      pkt_buf = vi->free_pkt_bufs;
      vi->free_pkt_bufs = vi->free_pkt_bufs->next;
      --vi->free_pkt_bufs_n;
      assert(pkt_buf->n_refs == 0);
      pkt_buf->n_refs = 1;
      ef_vi_receive_init(&vi->vi, pkt_buf->addr[vi->net_if->id] + RX_DMA_OFF,
                         pkt_buf->id);
    }
    ef_vi_receive_push(&vi->vi);
  }
}
Beispiel #6
0
/* Allocate and initialize a VI. */
static int init(const char* intf, int vi_i)
{
  struct vi* vi = &vis[vi_i];
  int i;
  TRY(ef_driver_open(&vi->dh));
  TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT));
  TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, -1, -1, NULL,
                          -1, EF_VI_FLAGS_DEFAULT));

  /* Memory for pkt buffers has already been allocated.  Map it into
   * the VI. */
  TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh,
                      pbs.mem, pbs.mem_size));
  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->rx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF;
    pkt_buf->tx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi);
    pkt_buf->rx_ptr[vi_i] = (char*) pkt_buf + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi->vi) == 511);
  assert(ef_vi_transmit_capacity(&vi->vi) == 511);

  while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_i);

  ef_filter_spec fs;
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_unicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_multicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  return 0;
}