예제 #1
0
/* Allocate and initialize a VI. */
static int init(const char* intf, int vi_i)
{
  struct vi* vi = &vis[vi_i];
  int i;
  unsigned vi_flags = EF_VI_FLAGS_DEFAULT;

  TRY(ef_driver_open(&vi->dh));
  /* check that RX merge is supported */
  if( cfg_rx_merge ) {
    unsigned long value;
    int ifindex = if_nametoindex(intf);
    TEST(ifindex > 0);
    int rc = ef_vi_capabilities_get(vi->dh, ifindex, EF_VI_CAP_RX_MERGE, &value);
    if( rc < 0 || ! value ) {
      fprintf(stderr, "WARNING: RX merge not supported on %s. Use '-c' "
              "option instead.\n", intf);
      exit(EXIT_FAILURE);
    }
    else {
      vi_flags |= EF_VI_RX_EVENT_MERGE;
    }
  }
  TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT));
  TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, RX_RING_SIZE,
                          TX_RING_SIZE, NULL, -1, vi_flags));


  /* Memory for pkt buffers has already been allocated.  Map it into
   * the VI. */
  TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh,
                      pbs.mem, pbs.mem_size));
  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->rx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF
      + addr_offset_from_id(i);
    pkt_buf->tx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi) + addr_offset_from_id(i);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi->vi) == RX_RING_SIZE - 1);
  assert(ef_vi_transmit_capacity(&vi->vi) == TX_RING_SIZE - 1);

  if( cfg_unidirectional && vi_i == 1 )
    return 0; /* only need filter and RX fill for ingress VI */

  while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_i);

  ef_filter_spec fs;
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_unicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_multicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  return 0;
}
예제 #2
0
/* Allocate and initialize a VI. */
static int init_vi(struct vi_state* vi_state)
{
  int i;
  TRY(ef_vi_alloc_from_set(&vi_state->vi, dh, &vi_set, dh, -1, -1, -1, 0, NULL,
                          -1, EF_VI_FLAGS_DEFAULT));

  /* The VI has just an RXQ with default capacity of 512 */
  vi_state->num = 512;
  vi_state->mem_size = vi_state->num * PKT_BUF_SIZE;
  vi_state->mem_size = ROUND_UP(vi_state->mem_size, huge_page_size);
  /* Allocate huge-page-aligned memory to give best chance of allocating
   * transparent huge-pages.
   */
  TEST(posix_memalign(&vi_state->mem, huge_page_size, vi_state->mem_size) == 0);
  TRY(ef_memreg_alloc(&vi_state->memreg, dh, &pd, dh, vi_state->mem,
                      vi_state->mem_size));

  for( i = 0; i < vi_state->num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, i);
    pkt_buf->rx_ef_addr =
      ef_memreg_dma_addr(&vi_state->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF;
    pkt_buf->rx_ptr = (char*) pkt_buf + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi_state->vi);
    pkt_buf_free(vi_state, pkt_buf);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi_state->vi) == 511);

  while( ef_vi_receive_space(&vi_state->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_state);

  return 0;
}
예제 #3
0
/* Allocate and initialize the packet buffers. */
static int init_pkts_memory(void)
{
  int i;

  /* Number of buffers is the worst case to fill up TX and RX queues.
   * For bi-directional forwarding need buffers for both VIs */
  pbs.num = RX_RING_SIZE + TX_RING_SIZE;
  if( ! cfg_unidirectional )
    pbs.num = 2 * pbs.num;
  pbs.mem_size = pbs.num * PKT_BUF_SIZE;
  pbs.mem_size = ROUND_UP(pbs.mem_size, huge_page_size);

  /* Allocate memory for DMA transfers. Try mmap() with MAP_HUGETLB to get huge
   * pages. If that fails, fall back to posix_memalign() and hope that we do
   * get them. */
  pbs.mem = mmap(NULL, pbs.mem_size, PROT_READ | PROT_WRITE,
                 MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0);
  if( pbs.mem == MAP_FAILED ) {
    fprintf(stderr, "mmap() failed. Are huge pages configured?\n");

    /* Allocate huge-page-aligned memory to give best chance of allocating
     * transparent huge-pages.
     */
    TEST(posix_memalign(&pbs.mem, huge_page_size, pbs.mem_size) == 0);
  }

  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->id = i;
    pkt_buf_free(pkt_buf);
  }
  return 0;
}
예제 #4
0
/* Handle an RX event on a VI. */
static void handle_rx(struct vi_state* vi_state, int pkt_buf_i, int len)
{
  struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, pkt_buf_i);
  ++vi_state->n_pkts;
  if( cfg_hexdump )
    hexdump(pkt_buf->rx_ptr, len);
  pkt_buf_free(vi_state, pkt_buf);
}
예제 #5
0
static void handle_batched_rx(int rx_vi_i, int pkt_buf_i)
{
  void* dma_ptr = (char*) pkt_buf_from_id(pkt_buf_i) + RX_DMA_OFF
    + addr_offset_from_id(pkt_buf_i);
  uint16_t len;
  TRY( ef_vi_receive_get_bytes(&vis[rx_vi_i].vi, dma_ptr ,&len) );

  handle_rx(rx_vi_i, pkt_buf_i, len);
}
예제 #6
0
파일: efvi_sfw.c 프로젝트: ido/openonload
static void pkt_buf_init(struct vi* vi, int pkt_buf_i)
{
  struct pkt_buf* pkt_buf;
  pkt_buf = pkt_buf_from_id(vi, pkt_buf_i);
  pkt_buf->vi_owner = vi;
  pkt_buf->addr[vi->net_if->id] =
    ef_memreg_dma_addr(&vi->memreg, pkt_buf_i * PKT_BUF_SIZE);
  pkt_buf->id = pkt_buf_i;
  pkt_buf->n_refs = 0;
  pkt_buf_free(pkt_buf);
}
예제 #7
0
파일: efvi_sfw.c 프로젝트: ido/openonload
void net_if_map_vi_pool(struct net_if* net_if, struct vi* vi)
{
  struct pkt_buf* pkt_buf;
  ef_memreg memreg;
  int i;

  /* If this fails it means you've tried to map buffers into a protection
   * domain that has already mapped those buffers.
   */
  TEST(vi->net_if != net_if);

  TRY(ef_memreg_alloc(&memreg, net_if->dh, &net_if->pd, net_if->dh,
                      vi->pkt_bufs, vi->pkt_bufs_n * PKT_BUF_SIZE));
  for( i = 0; i < vi->pkt_bufs_n; ++i ) {
    pkt_buf = pkt_buf_from_id(vi, i);
    pkt_buf->addr[net_if->id] = ef_memreg_dma_addr(&memreg, i * PKT_BUF_SIZE);
  }
}
예제 #8
0
/* Handle an RX event on a VI.  We forward the packet on the other VI. */
static void handle_rx(int rx_vi_i, int pkt_buf_i, int len)
{
  int rc;
  int tx_vi_i = 2 - 1 - rx_vi_i;
  struct vi* rx_vi = &vis[rx_vi_i];
  struct vi* tx_vi = &vis[tx_vi_i];
  struct pkt_buf* pkt_buf = pkt_buf_from_id(pkt_buf_i);

  ++rx_vi->n_pkts;
  rc = ef_vi_transmit(&tx_vi->vi, pkt_buf->tx_ef_addr[tx_vi_i], len,
                          pkt_buf->id);
  if( rc != 0 ) {
    assert(rc == -EAGAIN);
    /* TXQ is full.  A real app might consider implementing an overflow
     * queue in software.  We simply choose not to send.
     */
    pkt_buf_free(pkt_buf);
  }
}
예제 #9
0
/* Allocate and initialize a VI. */
static int init(const char* intf, int vi_i)
{
  struct vi* vi = &vis[vi_i];
  int i;
  TRY(ef_driver_open(&vi->dh));
  TRY(ef_pd_alloc_by_name(&vi->pd, vi->dh, intf, EF_PD_DEFAULT));
  TRY(ef_vi_alloc_from_pd(&vi->vi, vi->dh, &vi->pd, vi->dh, -1, -1, -1, NULL,
                          -1, EF_VI_FLAGS_DEFAULT));

  /* Memory for pkt buffers has already been allocated.  Map it into
   * the VI. */
  TRY(ef_memreg_alloc(&vi->memreg, vi->dh, &vi->pd, vi->dh,
                      pbs.mem, pbs.mem_size));
  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->rx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF;
    pkt_buf->tx_ef_addr[vi_i] =
      ef_memreg_dma_addr(&vi->memreg, i * PKT_BUF_SIZE) + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi);
    pkt_buf->rx_ptr[vi_i] = (char*) pkt_buf + RX_DMA_OFF +
      ef_vi_receive_prefix_len(&vi->vi);
  }

  /* Our pkt buffer allocation function makes assumptions on queue sizes */
  assert(ef_vi_receive_capacity(&vi->vi) == 511);
  assert(ef_vi_transmit_capacity(&vi->vi) == 511);

  while( ef_vi_receive_space(&vi->vi) > REFILL_BATCH_SIZE )
    vi_refill_rx_ring(vi_i);

  ef_filter_spec fs;
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_unicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  ef_filter_spec_init(&fs, EF_FILTER_FLAG_NONE);
  TRY(ef_filter_spec_set_multicast_all(&fs));
  TRY(ef_vi_filter_add(&vi->vi, vi->dh, &fs, NULL));
  return 0;
}
예제 #10
0
/* Allocate and initialize the packet buffers. */
static int init_pkts_memory(void)
{
  int i;

  /* Number of buffers is the worst case to fill up all the queues
   * assuming that you are going to allocate 2 VIs, both have a RXQ
   * and TXQ and both have default capacity of 512. */
  pbs.num = 4 * 512;
  pbs.mem_size = pbs.num * PKT_BUF_SIZE;
  pbs.mem_size = ROUND_UP(pbs.mem_size, huge_page_size);
  /* Allocate huge-page-aligned memory to give best chance of allocating
   * transparent huge-pages.
   */
  TEST(posix_memalign(&pbs.mem, huge_page_size, pbs.mem_size) == 0);

  for( i = 0; i < pbs.num; ++i ) {
    struct pkt_buf* pkt_buf = pkt_buf_from_id(i);
    pkt_buf->id = i;
    pkt_buf_free(pkt_buf);
  }
  return 0;
}
예제 #11
0
static void handle_rx_discard(struct vi_state* vi_state, int pkt_buf_i,
                              int discard_type)
{
  struct pkt_buf* pkt_buf = pkt_buf_from_id(vi_state, pkt_buf_i);
  pkt_buf_free(vi_state, pkt_buf);
}
예제 #12
0
static void complete_tx(int vi_i, int pkt_buf_i)
{
  struct pkt_buf* pkt_buf = pkt_buf_from_id(pkt_buf_i);
  pkt_buf_free(pkt_buf);
}
예제 #13
0
static void handle_rx_discard(int pkt_buf_i, int discard_type)
{
  struct pkt_buf* pkt_buf = pkt_buf_from_id(pkt_buf_i);
  pkt_buf_free(pkt_buf);
}