示例#1
0
/* Allocate and initialize a VI. */
static int init_vi(struct vi_state* vi_state)
{
  int i;
  TRY(ef_vi_alloc_from_set(&vi_state->vi, dh, &vi_set, dh, -1, -1, -1, 0, NULL,
                          -1, EF_VI_FLAGS_DEFAULT));

  /* The VI has just an RXQ with default capacity of 512 */
  vi_state->num = 512;
  vi_state->mem_size = vi_state->num * PKT_BUF_SIZE;
  vi_state->mem_size = ROUND_UP(vi_state->mem_size, huge_page_size);
  /* Allocate huge-page-aligned memory to give best chance of allocating
   * transparent huge-pages.
   */
  TEST(posix_memalign(&vi_state->mem, huge_page_size, vi_state->mem_size) == 0);
  TRY(ef_memreg_alloc(&vi_state->memreg, dh, &pd, dh, vi_state->mem,
                      vi_state->mem_size));

  for( i = 0; i < vi_state->num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, i);
    pkt_buf->rx_ef_addr =
      ef_memreg_dma_addr(&vi_state->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF;
    pkt_buf->rx_ptr = (char*) pkt_buf + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi_state->vi);
    pkt_buf_free(vi_state, pkt_buf);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi_state->vi) == 511);

  while( ef_vi_receive_space(&vi_state->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_state);

  return 0;
}
示例#2
0
static void loop(struct vi_state* vi_state)
{
  ef_event evs[EF_VI_EVENT_POLL_MIN_EVS];
  ef_vi* vi = &vi_state->vi;
  int i;

  pthread_mutex_lock(&ready_mutex);
  ++ready_cnt;
  pthread_cond_signal(&ready_cond);
  pthread_mutex_unlock(&ready_mutex);

  while( 1 ) {
    int n_ev = ef_eventq_poll(vi, evs, sizeof(evs) / sizeof(evs[0]));
    for( i = 0; i < n_ev; ++i )
      switch( EF_EVENT_TYPE(evs[i]) ) {
      case EF_EVENT_TYPE_RX:
        /* This code does not handle jumbos. */
        assert(EF_EVENT_RX_SOP(evs[i]) != 0);
        assert(EF_EVENT_RX_CONT(evs[i]) == 0);
        handle_rx(vi_state, EF_EVENT_RX_RQ_ID(evs[i]),
                  EF_EVENT_RX_BYTES(evs[i]) - ef_vi_receive_prefix_len(vi));
        break;
      case EF_EVENT_TYPE_RX_DISCARD:
        handle_rx_discard(vi_state, EF_EVENT_RX_DISCARD_RQ_ID(evs[i]),
                          EF_EVENT_RX_DISCARD_TYPE(evs[i]));
        break;
      default:
        LOGE("ERROR: unexpected event %d\n", (int) EF_EVENT_TYPE(evs[i]));
        break;
      }
    vi_refill_rx_ring(vi_state);
  }
}
示例#3
0
/* Allocate and initialize a VI. */
static int init(const char* intf, int vi_i)
{
  struct vi* vi = &vis[vi_i];
  int i;
  unsigned vi_flags = EF_VI_FLAGS_DEFAULT;

  TRY(ef_driver_open(&vi->dh));
  /* check that RX merge is supported */
  if( cfg_rx_merge ) {
    unsigned long value;
    int ifindex = if_nametoindex(intf);
    TEST(ifindex > 0);
    int rc = ef_vi_capabilities_get(vi->dh, ifindex, EF_VI_CAP_RX_MERGE, &value);
    if( rc < 0 || ! value ) {
      fprintf(stderr, "WARNING: RX merge not supported on %s. Use '-c' "
              "option instead.\n", intf);
      exit(EXIT_FAILURE);
    }
    else {
      vi_flags |= EF_VI_RX_EVENT_MERGE;
    }
  }
  TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT));
  TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, RX_RING_SIZE,
                          TX_RING_SIZE, NULL, -1, vi_flags));


  /* Memory for pkt buffers has already been allocated.  Map it into
   * the VI. */
  TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh,
                      pbs.mem, pbs.mem_size));
  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->rx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF
      + addr_offset_from_id(i);
    pkt_buf->tx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi) + addr_offset_from_id(i);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi->vi) == RX_RING_SIZE - 1);
  assert(ef_vi_transmit_capacity(&vi->vi) == TX_RING_SIZE - 1);

  if( cfg_unidirectional && vi_i == 1 )
    return 0; /* only need filter and RX fill for ingress VI */

  while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_i);

  ef_filter_spec fs;
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_unicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_multicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  return 0;
}
示例#4
0
/* Allocate and initialize a VI. */
static int init(const char* intf, int vi_i)
{
  struct vi* vi = &vis[vi_i];
  int i;
  TRY(ef_driver_open(&vi->dh));
  TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT));
  TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, -1, -1, NULL,
                          -1, EF_VI_FLAGS_DEFAULT));

  /* Memory for pkt buffers has already been allocated.  Map it into
   * the VI. */
  TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh,
                      pbs.mem, pbs.mem_size));
  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->rx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF;
    pkt_buf->tx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi);
    pkt_buf->rx_ptr[vi_i] = (char*) pkt_buf + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi->vi) == 511);
  assert(ef_vi_transmit_capacity(&vi->vi) == 511);

  while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_i);

  ef_filter_spec fs;
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_unicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_multicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  return 0;
}
示例#5
0
/* The main loop.  Poll each VI handling various types of events and
 * then try to refill them. */
static void main_loop(void)
{
  int i, j, k;

  while( 1 ) {
    for( i = 0; i < 2; ++i ) {
      ef_vi* vi = &vis[i].vi;
      ef_event evs[EF_VI_EVENT_POLL_MIN_EVS];
      int n_ev = ef_eventq_poll(vi, evs, sizeof(evs) / sizeof(evs[0]));
      for( j = 0; j < n_ev; ++j ) {
        switch( EF_EVENT_TYPE(evs[j]) ) {
        case EF_EVENT_TYPE_RX:
          /* This code does not handle jumbos. */
          assert(EF_EVENT_RX_SOP(evs[j]) != 0);
          assert(EF_EVENT_RX_CONT(evs[j]) == 0);
          handle_rx(i, EF_EVENT_RX_RQ_ID(evs[j]),
                    EF_EVENT_RX_BYTES(evs[j]) -
                    ef_vi_receive_prefix_len(vi));
          break;
        case EF_EVENT_TYPE_TX: {
          ef_request_id ids[EF_VI_TRANSMIT_BATCH];
          int ntx = ef_vi_transmit_unbundle(vi, &evs[j], ids);
          for( k = 0; k < ntx; ++k )
            complete_tx(i, ids[k]);
          break;
        }
        case EF_EVENT_TYPE_RX_DISCARD:
          handle_rx_discard(EF_EVENT_RX_DISCARD_RQ_ID(evs[j]),
                            EF_EVENT_RX_DISCARD_TYPE(evs[j]));
          break;
        default:
          LOGE("ERROR: unexpected event %d\n", (int) EF_EVENT_TYPE(evs[j]));
          break;
        }
      }
      vi_refill_rx_ring(i);
    }
  }
}