Esempio n. 1
0
int ef_vi_receive_unbundle(ef_vi* vi, const ef_event* ev,
                           ef_request_id* ids)
{
  ef_request_id* ids_in = ids;
  ef_vi_rxq* q = &vi->vi_rxq;
  ef_vi_rxq_state* qs = &vi->ep_state->rxq;
  unsigned i;

  EF_VI_BUG_ON( EF_EVENT_TYPE(*ev) != EF_EVENT_TYPE_RX_MULTI &&
                EF_EVENT_TYPE(*ev) != EF_EVENT_TYPE_RX_MULTI_DISCARD );
  EF_VI_BUG_ON( ev->rx_multi.n_descs > EF_VI_RECEIVE_BATCH );

  for( i = 0; i < ev->rx_multi.n_descs; ++i ) {
    unsigned di = qs->removed & q->mask;
    ++(qs->removed);
    if( q->ids[di] != EF_REQUEST_ID_MASK ) {
      *ids++ = q->ids[di];
      q->ids[di] = EF_REQUEST_ID_MASK;
    }
  }

  /* Check we didn't remove more than we've added. */
  EF_VI_ASSERT( qs->added - qs->removed <= q->mask );

  return (int) (ids - ids_in);
}
Esempio n. 2
0
static void ef10_tx_event_ts_enabled(ef_vi* evq, const ef_vi_event* ev,
				     ef_event** evs, int* evs_len)
{
  EF_VI_ASSERT(evq->vi_flags & EF_VI_TX_TIMESTAMPS);
  /* When TX timestamping is enabled, we get three events for
   * every transmit.  A TX completion and two timestamp events.
   * We ignore the completion and store the first timestamp in
   * the per TXQ state.  On the second timestamp we retrieve the
   * first one and construct a EF_EVENT_TYPE_TX_WITH_TIMESTAMP
   * event to send to the user. */
  if(QWORD_GET_U(ESF_DZ_TX_SOFT1, *ev) == 
     TX_TIMESTAMP_EVENT_TX_EV_COMPLETION) {
    /* TX completion event.  Ignored */
  }
  else if(QWORD_GET_U(ESF_DZ_TX_SOFT1, *ev) ==
          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO) {
    ef_vi_txq_state* qs = &evq->ep_state->txq;
    EF_VI_DEBUG(
                EF_VI_BUG_ON(qs->ts_nsec !=
                             EF_VI_TX_TIMESTAMP_TS_NSEC_INVALID)
                );
    qs->ts_nsec =
      (((((uint64_t)timestamp_extract(*ev)) *
         1000000000UL) >> 29) << 2) |
      evq->ep_state->evq.sync_flags;
  }
Esempio n. 3
0
int ef_eventq_put(unsigned evq_id, ef_driver_handle fd, unsigned ev)
{
  ci_resource_op_t  op;
  int64_t ev64;

  EF_VI_BUG_ON((ev & EFVI_FALCON_EVENT_SW_DATA_MASK) != ev);
  ev64 = ev;

  op.op = CI_RSOP_EVENTQ_PUT;
  op.id = efch_make_resource_id(evq_id);
  op.u.evq_put.ev = cpu_to_le64(ev64);
  return ci_resource_op(fd, &op);
}
Esempio n. 4
0
int __ef_vi_alloc(ef_vi* vi, ef_driver_handle vi_dh,
		  efch_resource_id_t pd_or_vi_set_id,
		  ef_driver_handle pd_or_vi_set_dh,
		  int index_in_vi_set, int ifindex, int evq_capacity,
		  int rxq_capacity, int txq_capacity,
		  ef_vi* evq, ef_driver_handle evq_dh,
		  int vi_clustered, enum ef_vi_flags vi_flags)
{
  struct ef_vi_nic_type nic_type;
  ci_resource_alloc_t ra;
  char *mem_mmap_ptr_orig, *mem_mmap_ptr;
  char *io_mmap_ptr, *io_mmap_base;
  ef_vi_state* state;
  int rc;
  const char* s;
  uint32_t* ids;
  void* p;
  int q_label;

  EF_VI_BUG_ON((evq == NULL) != (evq_capacity != 0));
  EF_VI_BUG_ON(! evq_capacity && ! rxq_capacity && ! txq_capacity);

  /* Ensure ef_vi_free() only frees what we allocate. */
  io_mmap_ptr = NULL;
  io_mmap_base = NULL;
  mem_mmap_ptr = mem_mmap_ptr_orig = NULL;

  if( evq == NULL )
    q_label = 0;
  else if( (q_label = evq->vi_qs_n) == EF_VI_MAX_QS )
    return -EBUSY;

  if( ifindex < 0 && (s = getenv("EF_VI_IFINDEX")) )
    ifindex = atoi(s);
  if( evq_capacity == -1 )
    evq_capacity = (s = getenv("EF_VI_EVQ_SIZE")) ? atoi(s) : -1;
  if( txq_capacity == -1 )
    txq_capacity = (s = getenv("EF_VI_TXQ_SIZE")) ? atoi(s) : -1;
  if( rxq_capacity == -1 )
    rxq_capacity = (s = getenv("EF_VI_RXQ_SIZE")) ? atoi(s) : -1;
  if( evq_capacity == -1 && (vi_flags & EF_VI_RX_PACKED_STREAM) )
    /* At time of writing we're doing this at user-level as well as in
     * driver.  Utimately we want this default to be applied in the driver
     * so we don't have to know this magic number (which may change in
     * future).  For now we also apply it here so that the default will be
     * applied when running against a 201405-u1 driver.  This can be
     * removed once the driver ABI changes.
     */
    evq_capacity = 32768;

  /* Allocate resource and mmap. */
  memset(&ra, 0, sizeof(ra));
  ef_vi_set_intf_ver(ra.intf_ver, sizeof(ra.intf_ver));
  ra.ra_type = EFRM_RESOURCE_VI;
  ra.u.vi_in.ifindex = ifindex;
  ra.u.vi_in.pd_or_vi_set_fd = pd_or_vi_set_dh;
  ra.u.vi_in.pd_or_vi_set_rs_id = pd_or_vi_set_id;
  ra.u.vi_in.vi_set_instance = index_in_vi_set;
  ra.u.vi_in.ps_buf_size_kb = (vi_flags & EF_VI_RX_PS_BUF_SIZE_64K) ? 64 : 1024;
  if( evq != NULL ) {
    ra.u.vi_in.evq_fd = evq_dh;
    ra.u.vi_in.evq_rs_id = efch_make_resource_id(evq->vi_resource_id);
  }
  else {
    ra.u.vi_in.evq_fd = -1;
    evq = vi;
  }
  ra.u.vi_in.evq_capacity = evq_capacity;
  ra.u.vi_in.txq_capacity = txq_capacity;
  ra.u.vi_in.rxq_capacity = rxq_capacity;
  ra.u.vi_in.tx_q_tag = q_label;
  ra.u.vi_in.rx_q_tag = q_label;
  ra.u.vi_in.flags = vi_flags_to_efab_flags(vi_flags);
  rc = ci_resource_alloc(vi_dh, &ra);
  if( rc < 0 ) {
    LOGVV(ef_log("%s: ci_resource_alloc %d", __FUNCTION__, rc));
    goto fail1;
  }

  evq_capacity = ra.u.vi_out.evq_capacity;
  txq_capacity = ra.u.vi_out.txq_capacity;
  rxq_capacity = ra.u.vi_out.rxq_capacity;

  rc = -ENOMEM;
  state = malloc(ef_vi_calc_state_bytes(rxq_capacity, txq_capacity));
  if( state == NULL )
    goto fail1;

  if( ra.u.vi_out.io_mmap_bytes ) {
    rc = ci_resource_mmap(vi_dh, ra.out_id.index, EFCH_VI_MMAP_IO,
			  ra.u.vi_out.io_mmap_bytes, &p);
    if( rc < 0 ) {
      LOGVV(ef_log("%s: ci_resource_mmap (io) %d", __FUNCTION__, rc));
      goto fail2;
    }
    { /* On systems with large pages, multiple VI windows are mapped into
       * each system page.  Therefore the VI window may not appear at the
       * start of the I/O mapping.
       */
      int inst_in_iopage = 0;
      int vi_windows_per_page = CI_PAGE_SIZE / 8192;
      if( vi_windows_per_page > 1 )
        inst_in_iopage = ra.u.vi_out.instance & (vi_windows_per_page - 1);
      io_mmap_base = (char*) p;
      io_mmap_ptr = io_mmap_base + inst_in_iopage * 8192;
    }
  }

  if( ra.u.vi_out.mem_mmap_bytes ) {
    rc = ci_resource_mmap(vi_dh, ra.out_id.index, EFCH_VI_MMAP_MEM,
			  ra.u.vi_out.mem_mmap_bytes, &p);
    if( rc < 0 ) {
      LOGVV(ef_log("%s: ci_resource_mmap (mem) %d", __FUNCTION__, rc));
      goto fail3;
    }
    mem_mmap_ptr = mem_mmap_ptr_orig = (char*) p;
  }

  rc = ef_vi_arch_from_efhw_arch(ra.u.vi_out.nic_arch);
  EF_VI_BUG_ON(rc < 0);
  nic_type.arch = (unsigned char) rc;
  nic_type.variant = ra.u.vi_out.nic_variant;
  nic_type.revision = ra.u.vi_out.nic_revision;

  rc = check_nic_compatibility(vi_flags, nic_type.arch);
  if( rc != 0 )
    goto fail4;

  ids = (void*) (state + 1);

  ef_vi_init(vi, nic_type.arch, nic_type.variant, nic_type.revision,
	     vi_flags, state);
  ef_vi_init_out_flags(vi, (ra.u.vi_out.out_flags & EFHW_VI_CLOCK_SYNC_STATUS) ?
                       EF_VI_OUT_CLOCK_SYNC_STATUS : 0);
  ef_vi_init_io(vi, io_mmap_ptr);
  if( evq_capacity ) {
    ef_vi_init_evq(vi, evq_capacity, mem_mmap_ptr);
    mem_mmap_ptr += ((evq_capacity * sizeof(efhw_event_t) + CI_PAGE_SIZE - 1)
		     & CI_PAGE_MASK);
  }
  if( rxq_capacity ) {
    ef_vi_init_rxq(vi, rxq_capacity, mem_mmap_ptr, ids,
		   ra.u.vi_out.rx_prefix_len);
    mem_mmap_ptr += (ef_vi_rx_ring_bytes(vi) + CI_PAGE_SIZE-1) & CI_PAGE_MASK;
    ids += rxq_capacity;
    if( vi_flags & EF_VI_RX_TIMESTAMPS ) {
      int rx_ts_correction;
      rc = get_ts_correction(vi_dh, ra.out_id.index, &rx_ts_correction);
      if( rc < 0 )
        goto fail4;
      ef_vi_init_rx_timestamping(vi, rx_ts_correction);
    }
  }
  if( txq_capacity )
    ef_vi_init_txq(vi, txq_capacity, mem_mmap_ptr, ids);

  vi->vi_io_mmap_ptr = io_mmap_base;
  vi->vi_mem_mmap_ptr = mem_mmap_ptr_orig;
  vi->vi_io_mmap_bytes = ra.u.vi_out.io_mmap_bytes;
  vi->vi_mem_mmap_bytes = ra.u.vi_out.mem_mmap_bytes;
  vi->vi_resource_id = ra.out_id.index;
  if( ra.u.vi_out.out_flags & EFHW_VI_PS_BUF_SIZE_SET )
    vi->vi_ps_buf_size = ra.u.vi_out.ps_buf_size;
  else
    vi->vi_ps_buf_size = 1024 * 1024;
  BUG_ON(vi->vi_ps_buf_size != 64*1024 &&
         vi->vi_ps_buf_size != 1024*1024);
  vi->vi_clustered = vi_clustered;
  vi->vi_i = ra.u.vi_out.instance;
  ef_vi_init_state(vi);
  rc = ef_vi_add_queue(evq, vi);
  BUG_ON(rc != q_label);
  vi->vi_is_packed_stream = !! (vi_flags & EF_VI_RX_PACKED_STREAM);

  if( vi->vi_is_packed_stream )
    ef_vi_packed_stream_update_credit(vi);

  return q_label;

 fail4:
  if( mem_mmap_ptr != NULL )
    ci_resource_munmap(vi_dh, mem_mmap_ptr, ra.u.vi_out.mem_mmap_bytes);
 fail3:
  if( io_mmap_base != NULL )
    ci_resource_munmap(vi_dh, io_mmap_base, ra.u.vi_out.io_mmap_bytes);
 fail2:
  free(state);
 fail1:
  --evq->vi_qs_n;
  return rc;
}
Esempio n. 5
0
int ef_eventq_has_many_events(ef_vi* vi, int look_ahead)
{
  EF_VI_BUG_ON(look_ahead < 0);
  return EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, look_ahead));
}
Esempio n. 6
0
int falcon_ef_eventq_poll(ef_vi* evq, ef_event* evs, int evs_len)
{
  int evs_len_orig = evs_len;
  ef_vi_event *pev, ev;

  EF_VI_BUG_ON(evs == NULL);
  EF_VI_BUG_ON(evs_len < EF_VI_EVENT_POLL_MIN_EVS);

#ifdef __powerpc__
  if(unlikely( EF_VI_IS_EVENT(EF_VI_EVENT_PTR(evq, -17)) ))
    goto overflow;
#else
  if(unlikely( EF_VI_IS_EVENT(EF_VI_EVENT_PTR(evq, -1)) ))
    goto overflow;
#endif

 not_empty:
  /* Read the event out of the ring, then fiddle with copied version.
   * Reason is that the ring is likely to get pushed out of cache by
   * another event being delivered by hardware.
   */
  pev = EF_VI_EVENT_PTR(evq, 0);
  ev = *pev;
  if (!EF_VI_IS_EVENT(&ev))
    goto empty;

  do {
#ifdef __powerpc__
    CI_SET_QWORD(*EF_VI_EVENT_PTR(evq, -16));
#else
    CI_SET_QWORD(*pev);
#endif
    evq->ep_state->evq.evq_ptr += sizeof(ef_vi_event);

    /* Ugly: Exploit the fact that event code lies in top bits
     * of event. */
    EF_VI_BUG_ON(EV_CODE_LBN < 32u);
    switch( CI_QWORD_FIELD(ev, EV_CODE) ) {
    case RX_IP_EV_DECODE:
      falcon_rx_event(evq, &ev, &evs, &evs_len);
      break;

    case TX_IP_EV_DECODE:
      falcon_tx_event(evs, &ev);
      --evs_len;
      ++evs;
      break;

    case DRV_GEN_EV_DECODE:
      falcon_drv_gen_event(evq, &ev, &evs, &evs_len);
      break;

    default:
      break;
    }

    if (evs_len == 0)
      break;

    pev = EF_VI_EVENT_PTR(evq, 0);
    ev = *pev;
  } while (EF_VI_IS_EVENT(&ev));

  return evs_len_orig - evs_len;


 empty:
  if (EF_VI_IS_EVENT(EF_VI_EVENT_PTR(evq, 1))) {
    smp_rmb();
    if (!EF_VI_IS_EVENT(EF_VI_EVENT_PTR(evq, 0))) {
      /* No event in current slot, but there is one in
       * the next slot.  Has NIC failed to write event
       * somehow?
       */
      evq->ep_state->evq.evq_ptr += sizeof(ef_vi_event);
      INC_ERROR_STAT(evq, evq_gap);
      goto not_empty;
    }
  }
  return 0;

 overflow:
  evs->generic.type = EF_EVENT_TYPE_OFLOW;
  return 1;
}