Пример #1
0
void citp_waitable_print(citp_waitable* w)
{
  /* Output socket using netstat style output:
   *   TCP 2 0 0.0.0.0:12865 0.0.0.0:0 LISTEN
   *   UDP 0 0 172.16.129.131:57521 0.0.0.0:0 UDP
   */
  if( CI_TCP_STATE_IS_SOCKET(w->state) ) {
    ci_sock_cmn* s = CI_CONTAINER(ci_sock_cmn, b, w);
    citp_waitable_obj* wo = CI_CONTAINER(citp_waitable_obj, waitable, w);
    int tq = 0;
    int rq = 0;
    
    if( (w->state & CI_TCP_STATE_TCP) &&
       !(w->state & CI_TCP_STATE_NOT_CONNECTED) ) {
      tq = ci_tcp_sendq_n_pkts(&wo->tcp);
      rq = wo->tcp.recv1.num + wo->tcp.recv2.num;
    }
    else if( w->state == CI_TCP_STATE_UDP ) {
      tq = wo->udp.tx_count + oo_atomic_read(&wo->udp.tx_async_q_level);
      rq = ci_udp_recv_q_pkts(&wo->udp.recv_q);
    }
    log("%s %d %d "OOF_IP4PORT" "OOF_IP4PORT" %s",
        citp_waitable_type_str(w), rq, tq,
        OOFA_IP4PORT(sock_laddr_be32(s), sock_lport_be16(s)),
        OOFA_IP4PORT(sock_raddr_be32(s), sock_rport_be16(s)),
        ci_tcp_state_str(w->state));
  }
}
Пример #2
0
void __citp_fdinfo_ref_count_zero(citp_fdinfo* fdi, int fdt_locked)
{
  Log_V(log("%s: fd=%d on_rcz=%d", __FUNCTION__, fdi->fd,
	    fdi->on_ref_count_zero));

  citp_fdinfo_assert_valid(fdi);
  ci_assert(oo_atomic_read(&fdi->ref_count) == 0);
  ci_assert_ge(fdi->fd, 0);
  ci_assert_lt(fdi->fd, citp_fdtable.inited_count);
  ci_assert_nequal(fdi_to_fdip(fdi), citp_fdtable.table[fdi->fd].fdip);

  switch( fdi->on_ref_count_zero ) {
  case FDI_ON_RCZ_CLOSE:
#if CI_CFG_FD_CACHING
    if( citp_fdinfo_get_ops(fdi)->cache(fdi) == 1 ) {
      if( ! fdt_locked && fdtable_strict() )  CITP_FDTABLE_LOCK();
      fdtable_swap(fdi->fd, fdip_closing, fdip_unknown,
                   fdt_locked | fdtable_strict());
      citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked | fdtable_strict());
      if( ! fdt_locked && fdtable_strict() )  CITP_FDTABLE_UNLOCK();
      citp_fdinfo_free(fdi);
      break;
    }
    else 
#endif
    {
      if( ! fdt_locked && fdtable_strict() )  CITP_FDTABLE_LOCK();
      ci_tcp_helper_close_no_trampoline(fdi->fd);
      /* The swap must occur after the close, otherwise another thread could
       * cause a probe of the old endpoint info, which is about be freed.
       */
      fdtable_swap(fdi->fd, fdip_closing, fdip_unknown,
		   fdt_locked | fdtable_strict());
      citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked | fdtable_strict());
      if( ! fdt_locked && fdtable_strict() )  CITP_FDTABLE_UNLOCK();
      citp_fdinfo_free(fdi);
      break;
    }
  case FDI_ON_RCZ_DUP2:
    dup2_complete(fdi, fdi_to_fdip(fdi), fdt_locked);
    break;
  case FDI_ON_RCZ_HANDOVER:
    citp_fdinfo_do_handover(fdi, fdt_locked);
    break;
  case FDI_ON_RCZ_MOVED:
    citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked);
    citp_fdinfo_free(fdi);
    break;
  default:
    CI_DEBUG(ci_log("%s: fd=%d on_ref_count_zero=%d", __FUNCTION__,
		    fdi->fd, fdi->on_ref_count_zero));
    ci_assert(0);
  }
}
Пример #3
0
void citp_fdtable_insert(citp_fdinfo* fdi, unsigned fd, int fdt_locked)
{
  ci_assert(fdi);
  ci_assert(fdi->protocol);
  ci_assert(citp_fdtable.inited_count > fd);
  ci_assert_ge(oo_atomic_read(&fdi->ref_count), 1);

  fdi->fd = fd;
  CI_DEBUG(fdi->on_ref_count_zero = FDI_ON_RCZ_NONE);
  fdi->is_special = 0;
  citp_fdtable_busy_clear(fd, fdi_to_fdip(fdi), fdt_locked);
}
Пример #4
0
/* This function does some simple tests to ensure that the fdtable makes sense.
 * There are many more tests we could do; feel free to add them at your
 * leisure!
 */
void
citp_fdtable_assert_valid(void)
{
  int i;

  if( ! citp_fdtable.table )  return;

  CITP_FDTABLE_LOCK_RD();

  for( i = 0; i < citp_fdtable.inited_count; i++ ) {
    citp_fdinfo_p fdip = citp_fdtable.table[i].fdip;

    if( fdip_is_normal(fdip) ) {
      citp_fdinfo * fdi = fdip_to_fdi(fdip);

      ci_assert(fdi);
      ci_assert(fdi->protocol);
      if( ( fdi->protocol->type == CITP_TCP_SOCKET ||
            fdi->protocol->type == CITP_UDP_SOCKET )
          && fdi_to_socket(fdi)->s )
	ci_assert(! (fdi_to_socket(fdi)->s->b.sb_aflags & CI_SB_AFLAG_ORPHAN));

      if (!fdi->is_special) {
        /* Ensure the "back pointer" makes sense */
        ci_assert (fdi->fd == i);
        /* Ensure that the reference count is in a vaguely sensible range */
        ci_assert ((oo_atomic_read (&fdi->ref_count) > 0) &&
                   (oo_atomic_read (&fdi->ref_count) < 10000));

        /* 10,000 threads is a bit mad, warn if more than 20 */
        if (oo_atomic_read (&fdi->ref_count) > 20) {
          Log_U (log ("Warning: fd %d's ref-count suspiciously large (%d)\n",
                      i, oo_atomic_read (&fdi->ref_count)));
        }
      }
    }
  }

  CITP_FDTABLE_UNLOCK_RD();
}
Пример #5
0
/* 2004/08/16 stg: added [fdt_locked] to allow the fd table to be
 * locked before this function.  [fdt_locked] = 0 for legacy operation
 */
void __citp_epinfo_ref_count_zero(citp_epinfo* epinfo,citp_fdinfo* last_fdinfo, 
				  int fdt_locked)
{
  Log_V(log(LPF "ref_count_zero(%p, %d)", epinfo, last_fdinfo->fd));

  ci_assert(epinfo);
  ci_assert(oo_atomic_read(&epinfo->ref_count) == 0);
  ci_assert(epinfo->protocol);
  ci_assert(last_fdinfo);
  ci_assert(last_fdinfo->ep == epinfo);

  epinfo->protocol->ops.dtor(epinfo, last_fdinfo, fdt_locked);
}
Пример #6
0
static int efab_file_move_supported_tcp(ci_netif *ni, ci_tcp_state *ts)
{
#if CI_CFG_FD_CACHING
  /* Don't support moving cached sockets for now */
  if( ci_tcp_is_cached(ts) ||
      !ci_ni_dllist_is_self_linked(ni, &ts->epcache_link) )
    return false;
#endif

  /* TCP closed: supported */
  if( ts->s.b.state == CI_TCP_CLOSED )
    return true;

  /* everything except TCP connected is not supported */
  if( !(ts->s.b.state & CI_TCP_STATE_TCP_CONN) )
    return false;
  if( ts->local_peer != OO_SP_NULL )
    return false;
  if( !(ts->tcpflags & CI_TCPT_FLAG_PASSIVE_OPENED) )
    return false;

  /* send queue is not supported
   * NB: retrans_ptr is uninitialised when retrans was not used yet,
   * so do not check for !OO_PP_IS_NULL(ts->retrans_ptr) */
  if( !ci_ip_queue_is_empty(&ts->send) ||
      ts->send_prequeue != OO_PP_ID_NULL ||
      oo_atomic_read(&ts->send_prequeue_in) != 0 ||
      !ci_ip_queue_is_empty(&ts->retrans) ||
      ci_ip_timer_pending(ni, &ts->rto_tid) ||
      ci_ip_timer_pending(ni, &ts->zwin_tid) ||
#if CI_CFG_TAIL_DROP_PROBE
      ci_ip_timer_pending(ni, &ts->taildrop_tid) ||
#endif
      ci_ip_timer_pending(ni, &ts->cork_tid) )
    return false;

  /* Sockets with allocated templates are not supported */
  if( OO_PP_NOT_NULL(ts->tmpl_head) )
    return false;

  return true;
}
Пример #7
0
citp_fdinfo*
citp_fdtable_lookup_fast(citp_lib_context_t* ctx, unsigned fd)
{
  /* Note that if we haven't yet initialised this module, then
  ** [inited_count] will be zero, and the following test will fail.  So the
  ** test for initialisation is done further down...
  **
  ** This is highly performance critial.  DO NOT add any code between here
  ** and the first [return] statement.
  */
  citp_fdinfo* fdi;

  /* Try to avoid entering lib. */
  ctx->thread = NULL;

  if( fd < citp_fdtable.inited_count ) {
    volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
    citp_fdinfo_p fdip;

  again:
    fdip = *p_fdip;
    if( fdip_is_normal(fdip) ) {

      citp_enter_lib_if(ctx);
      if( citp_fdtable_is_mt_safe() ) {
	/* No need to use atomic ops or add a ref to the fdi when MT-safe.
         * The definition of "fds_mt_safe" is that the app does not change
         * the meaning of a file descriptor in one thread when it is being
         * used in another thread.
         */
        fdi = fdip_to_fdi(fdip);
        if( ! citp_fdinfo_is_consistent(fdi) )
          fdi = citp_reprobe_moved(fdi, CI_TRUE, CI_FALSE);

	return fdi;
      }
      else {
        /* Swap in the busy marker. */
	if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) {
	  fdi = fdip_to_fdi(fdip);

	  ci_assert(fdi);
	  ci_assert_gt(oo_atomic_read(&fdi->ref_count), 0);
	  ci_assert(fdip_is_closing(fdip) || fdip_is_reserved(fdip) ||
		    fdi->fd == fd);
	  /* Bump the reference count. */
	  citp_fdinfo_ref(fdi);

          if( ! citp_fdinfo_is_consistent(fdi) )
            fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_TRUE);
          else {
            /* Swap the busy marker out again. */
            citp_fdtable_busy_clear(fd, fdip, 0);
          }
	  return fdi;
	}
	goto again;
      }
    }

    /* Not normal! */
    if( fdip_is_passthru(fdip) )
      return NULL;

    citp_enter_lib_if(ctx);
    if( fdip_is_busy(fdip) ) {
      citp_fdtable_busy_wait(fd, 0);
      goto again;
    }

    ci_assert(fdip_is_unknown(fdip));
    goto probe;
  }

  if( citp.init_level < CITP_INIT_FDTABLE ) {
    if( _citp_do_init_inprogress == 0 )
      CI_TRY(citp_do_init(CITP_INIT_ALL));
    else
      CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */
  }

  if( fd >= citp_fdtable.size )
    return NULL;

 probe:
  citp_enter_lib_if(ctx);
  fdi = citp_fdtable_probe(fd);
  if( fdi && citp_fdtable_is_mt_safe() )
    citp_fdinfo_release_ref(fdi, 0);
  return fdi;
}
Пример #8
0
citp_fdinfo *
citp_fdtable_lookup(unsigned fd)
{
  /* Note that if we haven't yet initialised this module, then
  ** [inited_count] will be zero, and the following test will fail.  So the
  ** test for initialisation is done further down...
  **
  ** This is highly performance critial.  DO NOT add any code between here
  ** and the first [return] statement.
  */
  citp_fdinfo* fdi;

  /* In some cases, we'll lock fdtable.  Assert that it is possible: */
  ci_assert(oo_per_thread_get()->sig.inside_lib);

  if( fd < citp_fdtable.inited_count ) {

    volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
    citp_fdinfo_p fdip;

  again:
    /* Swap in the busy marker. */
    fdip = *p_fdip;

    if( fdip_is_normal(fdip) ) {
      if( citp_fdtable_not_mt_safe() ) {
	if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) {
	  fdi = fdip_to_fdi(fdip);
	  ci_assert(fdi);
	  ci_assert_gt(oo_atomic_read(&fdi->ref_count), 0);
	  ci_assert(fdip_is_closing(fdip) || fdip_is_reserved(fdip) ||
		    fdi->fd == fd);
	  /* Bump the reference count. */
	  citp_fdinfo_ref(fdi);

          if( ! citp_fdinfo_is_consistent(fdi) ) {
            /* Something is wrong.  Re-probe. */
            fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_TRUE);
          }
          else {
            /* Swap the busy marker out again. */
            citp_fdtable_busy_clear(fd, fdip, 0);
          }
	  return fdi;
	}
	goto again;
      }
      else {
	/* No need to use atomic ops when single-threaded.  The definition
         * of "fds_mt_safe" is that the app does not change the meaning of
         * a file descriptor in one thread when it is being used in another
         * thread.  In that case I'm hoping this should be safe, but at
         * time of writing I'm really not confident.  (FIXME).
         */
	fdi = fdip_to_fdi(fdip);
        if( ci_is_multithreaded() )
	  citp_fdinfo_ref(fdi);
        else
          ++fdi->ref_count.n;

        if( ! citp_fdinfo_is_consistent(fdi) )
          fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_FALSE);

	return fdi;
      }
    }

    /* Not normal! */
    if( fdip_is_passthru(fdip) )  return NULL;

    if( fdip_is_busy(fdip) ) {
      citp_fdtable_busy_wait(fd, 0);
      goto again;
    }

    ci_assert(fdip_is_unknown(fdip));
    goto probe;
  }

  if (citp.init_level < CITP_INIT_FDTABLE) {
    if (_citp_do_init_inprogress == 0)
      CI_TRY(citp_do_init(CITP_INIT_ALL));
    else
      CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */
  }

  if( fd >= citp_fdtable.size )  return NULL;

 probe:
  fdi = citp_fdtable_probe(fd);

  return fdi;
}
Пример #9
0
int citp_ep_dup3(unsigned fromfd, unsigned tofd, int flags)
{
  volatile citp_fdinfo_p* p_tofdip;
  citp_fdinfo_p tofdip;
  unsigned max;

  Log_V(log("%s(%d, %d)", __FUNCTION__, fromfd, tofd));

  /* Must be checked by callers. */
  ci_assert(fromfd != tofd);

  /* Hack: if [tofd] is the fd we're using for logging, we'd better choose
  ** a different one!
  */
  if( tofd == citp.log_fd )  citp_log_change_fd();

  ci_assert(citp.init_level >= CITP_INIT_FDTABLE);

  max = CI_MAX(fromfd, tofd);
  if( max >= citp_fdtable.inited_count ) {
    ci_assert(max < citp_fdtable.size);
    CITP_FDTABLE_LOCK();
    __citp_fdtable_extend(max);
    CITP_FDTABLE_UNLOCK();
  }

  /* Bug1151: Concurrent threads doing dup2(x,y) and dup2(y,x) can deadlock
  ** against one another.  So we take out a fat lock to prevent concurrent
  ** dup2()s.
  */
  /* Lock tofd.  We need to interlock against select and poll etc, so we
  ** also grab the exclusive lock.  Also grab the bug1151 lock.
  */
  pthread_mutex_lock(&citp_dup_lock);
  CITP_FDTABLE_LOCK();
  p_tofdip = &citp_fdtable.table[tofd].fdip;
 lock_tofdip_again:
  tofdip = *p_tofdip;
  if( fdip_is_busy(tofdip) )
    tofdip = citp_fdtable_busy_wait(tofd, 1);
  if( fdip_is_closing(tofdip) )
    tofdip = citp_fdtable_closing_wait(tofd, 1);
  if( fdip_is_reserved(tofdip) ) {
    /* ?? FIXME: we can't cope with this at the moment */
    CITP_FDTABLE_UNLOCK();
    Log_U(log("%s(%d, %d): target is reserved", __FUNCTION__, fromfd, tofd));
    errno = EBUSY;
    tofd = -1;
    goto out;
  }
  if( fdip_cas_fail(p_tofdip, tofdip, fdip_busy) )
    goto lock_tofdip_again;
  CITP_FDTABLE_UNLOCK();
  ci_assert(fdip_is_normal(tofdip) | fdip_is_passthru(tofdip) |
 	    fdip_is_unknown(tofdip));

  if( fdip_is_normal(tofdip) ) {
    /* We're duping onto a user-level socket. */
    citp_fdinfo* tofdi = fdip_to_fdi(tofdip);
    if( tofdi->epoll_fd >= 0 ) {
      citp_fdinfo* epoll_fdi = citp_epoll_fdi_from_member(tofdi, 0);
      if( epoll_fdi ) {
        if( epoll_fdi->protocol->type == CITP_EPOLL_FD )
          citp_epoll_on_close(epoll_fdi, tofdi, 0);
        citp_fdinfo_release_ref(epoll_fdi, 0);
      }
    }
    ci_assert_equal(tofdi->on_ref_count_zero, FDI_ON_RCZ_NONE);
    tofdi->on_ref_count_zero = FDI_ON_RCZ_DUP2;
    tofdi->on_rcz.dup3_args.fd = fromfd;
    tofdi->on_rcz.dup3_args.flags = flags;
    citp_fdinfo_release_ref(tofdi, 0);
    {
      int i = 0;
      /* We need to free this fdi.  If someone is using it right now,
       * we are in trouble.  So, we spin for a while and interrupt the
       * user.  See bug 28123. */
      while( tofdi->on_ref_count_zero != FDI_ON_RCZ_DONE ) {
        if( ci_is_multithreaded() && i % 10000 == 9999 ) {
          pthread_t pth = tofdi->thread_id;
          if( pth !=  pthread_self() && pth != PTHREAD_NULL ) {
            pthread_kill(pth, SIGONLOAD);
            sleep(1);
          }
        }
        ci_spinloop_pause();
        i++;
      }
      ci_rmb();
    }
    if( tofdi->on_rcz.dup2_result < 0 ) {
      errno = -tofdi->on_rcz.dup2_result;
      /* Need to re-insert [tofdi] into the table. */
      ci_assert_equal(oo_atomic_read(&tofdi->ref_count), 0);
      oo_atomic_set(&tofdi->ref_count, 1);
      CI_DEBUG(tofdi->on_ref_count_zero = FDI_ON_RCZ_NONE);
      citp_fdtable_busy_clear(tofd, tofdip, 0);
      tofd = -1;
    }
    else {
      ci_assert(tofdi->on_rcz.dup2_result == tofd);
      citp_fdinfo_get_ops(tofdi)->dtor(tofdi, 0);
      citp_fdinfo_free(tofdi);
    }
    goto out;
  }

  ci_assert(fdip_is_passthru(tofdip) | fdip_is_unknown(tofdip));

  { /* We're dupping onto an O/S descriptor, or it may be closed.  Create a
    ** dummy [citp_fdinfo], just so we can share code with the case above.
    */
    citp_fdinfo fdi;
    fdi.fd = tofd;
    fdi.on_rcz.dup3_args.fd = fromfd;
    fdi.on_rcz.dup3_args.flags = flags;
    dup2_complete(&fdi, tofdip, 0);
    if( fdi.on_rcz.dup2_result < 0 ) {
      errno = -fdi.on_rcz.dup2_result;
      citp_fdtable_busy_clear(tofd, tofdip, 0);
      tofd = -1;
    }
    else
      ci_assert(fdi.on_rcz.dup2_result == tofd);
  }

 out:
  pthread_mutex_unlock(&citp_dup_lock);
  return tofd;
}
Пример #10
0
void ci_udp_state_dump(ci_netif* ni, ci_udp_state* us, const char* pf,
                      oo_dump_log_fn_t logger, void* log_arg)
{
  ci_udp_socket_stats uss = us->stats;
  unsigned rx_added = us->recv_q.pkts_added;
  unsigned rx_os = uss.n_rx_os + uss.n_rx_os_slow;
  unsigned rx_total = rx_added + uss.n_rx_mem_drop + uss.n_rx_overflow + rx_os;
  unsigned n_tx_onload = uss.n_tx_onload_uc + uss.n_tx_onload_c;
  unsigned tx_total = n_tx_onload + uss.n_tx_os;
  ci_ip_cached_hdrs* ipcache;

  (void) rx_total;  /* unused on 32-bit builds in kernel */
  (void) tx_total;

#if CI_CFG_TIMESTAMPING
  if( us->s.timestamping_flags & ONLOAD_SOF_TIMESTAMPING_TX_HARDWARE )
    ci_udp_recvq_dump(ni, &us->timestamp_q, pf, "  TX timestamping queue:",
                      logger, log_arg);
#endif

  /* General. */
  logger(log_arg, "%s  udpflags: "CI_UDP_STATE_FLAGS_FMT, pf,
         CI_UDP_STATE_FLAGS_PRI_ARG(us));

  /* Receive path. */
  ci_udp_recvq_dump(ni, &us->recv_q, pf, "  rcv:", logger, log_arg);
  logger(log_arg,
         "%s  rcv: oflow_drop=%u(%u%%) mem_drop=%u eagain=%u pktinfo=%u "
         "q_max_pkts=%u", pf, uss.n_rx_overflow,
         percent(uss.n_rx_overflow, rx_total),
         uss.n_rx_mem_drop, uss.n_rx_eagain, uss.n_rx_pktinfo, 
         uss.max_recvq_pkts);
  logger(log_arg, "%s  rcv: os=%u(%u%%) os_slow=%u os_error=%u", pf,
         rx_os, percent(rx_os, rx_total), uss.n_rx_os_slow, uss.n_rx_os_error);

  /* Send path. */
  logger(log_arg, "%s  snd: q=%u+%u ul=%u os=%u(%u%%)", pf,
         us->tx_count, oo_atomic_read(&us->tx_async_q_level),
         n_tx_onload, uss.n_tx_os, percent(uss.n_tx_os, tx_total));
  logger(log_arg,
         "%s  snd: LOCK cp=%u(%u%%) pkt=%u(%u%%) snd=%u(%u%%) poll=%u(%u%%) "
         "defer=%u(%u%%)", pf,
         uss.n_tx_lock_cp,  percent(uss.n_tx_lock_cp,  n_tx_onload),
         uss.n_tx_lock_pkt,  percent(uss.n_tx_lock_pkt,  n_tx_onload),
         uss.n_tx_lock_snd,  percent(uss.n_tx_lock_snd,  n_tx_onload),
         uss.n_tx_lock_poll, percent(uss.n_tx_lock_poll, n_tx_onload),
         uss.n_tx_lock_defer, percent(uss.n_tx_lock_defer, n_tx_onload));

  logger(log_arg, "%s  snd: MCAST if=%d src="OOF_IP4" ttl=%d", pf,
         us->s.cp.ip_multicast_if,
         OOFA_IP4(us->s.cp.ip_multicast_if_laddr_be32),
         (int) us->s.cp.ip_mcast_ttl);

  /* State relating to unconnected sends. */
  ipcache = &us->ephemeral_pkt;
  logger(log_arg,
         "%s  snd: TO n=%u match=%u(%u%%) "
         "lookup=%u+%u(%u%%) "OOF_IPCACHE_STATE,
         pf, uss.n_tx_onload_uc,
         uss.n_tx_cp_match, percent(uss.n_tx_cp_match, uss.n_tx_onload_uc),
         uss.n_tx_cp_uc_lookup, uss.n_tx_cp_a_lookup,
         percent(uss.n_tx_cp_uc_lookup + uss.n_tx_cp_a_lookup,
                 uss.n_tx_onload_uc),
         OOFA_IPCACHE_STATE(ni, ipcache));
  logger(log_arg, "%s  snd: TO "OOF_IPCACHE_DETAIL, pf,
         OOFA_IPCACHE_DETAIL(ipcache));
  logger(log_arg, "%s  snd: TO "OOF_IP4PORT" => "OOF_IP4PORT, pf,
         OOFA_IP4PORT(ipcache->ip_saddr.ip4, udp_lport_be16(us)),
         OOFA_IP4PORT(ipcache->ip.ip_daddr_be32, ipcache->dport_be16));
   
  /* State relating to connected sends. */
  ipcache = &us->s.pkt;
  logger(log_arg, "%s  snd: CON n=%d lookup=%d "OOF_IPCACHE_STATE, pf,
         uss.n_tx_onload_c, uss.n_tx_cp_c_lookup,
         OOFA_IPCACHE_STATE(ni,ipcache));
  logger(log_arg, "%s  snd: CON "OOF_IPCACHE_DETAIL, pf,
         OOFA_IPCACHE_DETAIL(ipcache));

  logger(log_arg, "%s  snd: eagain=%d spin=%d block=%d", pf,
         uss.n_tx_eagain, uss.n_tx_spin, uss.n_tx_block);
  logger(log_arg, "%s  snd: poll_avoids_full=%d fragments=%d confirm=%d", pf,
         uss.n_tx_poll_avoids_full, uss.n_tx_fragments, uss.n_tx_msg_confirm);
  logger(log_arg,
         "%s  snd: os_slow=%d os_late=%d unconnect_late=%d nomac=%u(%u%%)", pf,
         uss.n_tx_os_slow, uss.n_tx_os_late, uss.n_tx_unconnect_late,
         uss.n_tx_cp_no_mac, percent(uss.n_tx_cp_no_mac, tx_total));
}
Пример #11
0
static int ci_udp_ioctl_locked(ci_netif* ni, ci_udp_state* us,
                               ci_fd_t fd, int request, void* arg)
{
  int rc;

  switch( request ) {
  case FIONREAD: /* synonym of SIOCINQ */
    if( ! CI_IOCTL_ARG_OK(int, arg) )
      return -EFAULT;
    rc = 1;
    if( rc ) {
      /* Return the size of the datagram at the head of the receive queue.
       *
       * Careful: extract side of receive queue is owned by sock lock,
       * which we don't have.  However, freeing of bufs is owned by netif
       * lock, which we do have.  So we're safe so long as we only read
       * [extract] once.
       */
      oo_pkt_p extract = us->recv_q.extract;
      if( OO_PP_NOT_NULL(extract) ) {
        ci_ip_pkt_fmt* pkt = PKT_CHK(ni, extract);
        if( (pkt->rx_flags & CI_PKT_RX_FLAG_RECV_Q_CONSUMED) &&
            OO_PP_NOT_NULL(pkt->udp_rx_next) )
          pkt = PKT_CHK(ni, pkt->udp_rx_next);
        if( !(pkt->rx_flags & CI_PKT_RX_FLAG_RECV_Q_CONSUMED) ) {
          *(int*) arg = pkt->pf.udp.pay_len;
          return 0;
        }
      }
    }
    /* Nothing in userlevel receive queue: So take the value returned by
     * the O/S socket.
     */
    if( !(us->s.os_sock_status & OO_OS_STATUS_RX) ) {
      *(int*)arg = 0;
      return 0;
    }
    goto sys_ioctl;

  case TIOCOUTQ: /* synonym of SIOCOUTQ */
    if( ! CI_IOCTL_ARG_OK(int, arg) )
      return -EFAULT;

    *(int*)arg = us->tx_count + oo_atomic_read(&us->tx_async_q_level);
    return 0;

  case SIOCGSTAMP:
#if defined( __linux__) && defined(__KERNEL__)
/* The following code assumes the width of the timespec and timeval fields */
# error "Need to consider 32-on-64 bit setting of timeval arg" 
#endif
    if( ! (us->udpflags & CI_UDPF_LAST_RECV_ON) )
      return oo_os_sock_ioctl(ni, us->s.b.bufid, request, arg, NULL);
    return ci_udp_ioctl_siocgstamp(ni, us, arg, 1);
  case SIOCGSTAMPNS:
    if( ! (us->udpflags & CI_UDPF_LAST_RECV_ON) )
      return oo_os_sock_ioctl(ni, us->s.b.bufid, request, arg, NULL);
    return ci_udp_ioctl_siocgstamp(ni, us, arg, 0);
  }

  return ci_udp_ioctl_slow(ni, us, fd, request, arg);

 sys_ioctl:
  return oo_os_sock_ioctl(ni, us->s.b.bufid, request, arg, NULL);
}
Пример #12
0
static int
efab_tcp_helper_get_info(ci_private_t *unused, void *arg)
{
  ci_netif_info_t *info = arg;
  int index, rc=0;
  tcp_helper_resource_t* thr = NULL;
  ci_netif* ni = NULL;
  int flags = EFAB_THR_TABLE_LOOKUP_CHECK_USER | EFAB_THR_TABLE_LOOKUP_NO_WARN; 

#if CI_CFG_EFAB_EPLOCK_RECORD_CONTENTIONS
  int j;
  eplock_resource_t* eplock_rs;
#endif

  info->ni_exists = 0;
  info->ni_no_perms_exists = 0;
  if( info->ni_orphan ) {
    flags |= EFAB_THR_TABLE_LOOKUP_NO_UL;
    info->ni_orphan = 0;
  }
  rc = efab_thr_table_lookup(NULL, info->ni_index, flags, &thr);
  if( rc == 0 ) {
    info->ni_exists = 1;
    info->ni_orphan = (thr->k_ref_count & TCP_HELPER_K_RC_NO_USERLAND);
    ni = &thr->netif;
    info->mmap_bytes = thr->mem_mmap_bytes;
    info->k_ref_count = thr->k_ref_count;
    info->rs_ref_count = oo_atomic_read(&thr->ref_count);
    memcpy(info->ni_name, ni->state->name, sizeof(ni->state->name));
  } else if( rc == -EACCES ) {
    info->ni_no_perms_id = info->ni_index;
    if( efab_thr_get_inaccessible_stack_info(info->ni_index, 
                                             &info->ni_no_perms_uid,
                                             &info->ni_no_perms_euid,
                                             &info->ni_no_perms_share_with,
                                             info->ni_no_perms_name) == 0 )
      info->ni_no_perms_exists = 1;
  }

  /* sub-ops that do not need the netif to exist */
  if( info->ni_subop == CI_DBG_NETIF_INFO_GET_NEXT_NETIF ) {
    tcp_helper_resource_t* next_thr;

    info->u.ni_next_ni.index = -1;
    for( index = info->ni_index + 1;
         index < 10000 /* FIXME: magic! */;
         ++index ) {
      rc = efab_thr_table_lookup(NULL, index, flags, &next_thr);
      if( rc == 0 ) {
        if( next_thr->k_ref_count & TCP_HELPER_K_RC_NO_USERLAND )
          efab_tcp_helper_k_ref_count_dec(next_thr, 1);
        else
          efab_thr_release(next_thr);
        info->u.ni_next_ni.index = index;
        break;
      }
      if( rc == -EACCES ) {
        info->u.ni_next_ni.index = index;
        break;
      }
    }
    rc = 0;
  }
  else if( info->ni_subop == CI_DBG_NETIF_INFO_NOOP ) {
    rc = 0;
  }

  if (!info->ni_exists)
    return 0;

  /* sub-ops that need the netif to exist */
  switch (info->ni_subop)
  {

    case CI_DBG_NETIF_INFO_GET_ENDPOINT_STATE:
      index = info->u.ni_endpoint.index;
      info->u.ni_endpoint.max = thr->netif.ep_tbl_n;
      if ((index < 0) || (index >= (int)thr->netif.ep_tbl_n)) {
        info->u.ni_endpoint.state = CI_TCP_STATE_FREE;
      }
      else {
        citp_waitable_obj* wo = ID_TO_WAITABLE_OBJ(ni, index);

        info->u.ni_endpoint.state = wo->waitable.state;

        if( wo->waitable.state == CI_TCP_STATE_UDP ) {
          ci_udp_state* us = &wo->udp;
          info->u.ni_endpoint.udpstate = us->udpflags;
          info->u.ni_endpoint.rx_pkt_ul = us->recv_q.pkts_delivered;
          info->u.ni_endpoint.rx_pkt_kn = us->stats.n_rx_os;
        }
        else if( wo->waitable.state & CI_TCP_STATE_TCP_CONN ) {
          ci_tcp_state* ts = &wo->tcp;
          info->u.ni_endpoint.tx_pkts_max = ts->so_sndbuf_pkts;
          info->u.ni_endpoint.tx_pkts_num = ts->send.num;
        }
        if( wo->waitable.state & CI_TCP_STATE_SOCKET ) {
          ci_sock_cmn* s = &wo->sock;
          info->u.ni_endpoint.protocol = (int) sock_protocol(s);
          info->u.ni_endpoint.laddr = sock_laddr_be32(s);
          info->u.ni_endpoint.lport = (int) sock_lport_be16(s);
          info->u.ni_endpoint.raddr = sock_raddr_be32(s);
          info->u.ni_endpoint.rport = (int) sock_rport_be16(s);
        }
      }
      break;

    case CI_DBG_NETIF_INFO_GET_NEXT_NETIF:
      /* If the current netif is found, we need to succeed */
      break;

    case CI_DBG_NETIF_INFO_NOOP:
      /* Always succeeds, rc already set */
      break;

    default:
      rc = -EINVAL;
      break;
  }
  if( thr ) {
    /* Lookup needs a matching efab_thr_release() in case of ordinary
     * stack but just a ref_count_dec in case of orphan
     */
    if( thr->k_ref_count & TCP_HELPER_K_RC_NO_USERLAND )
      efab_tcp_helper_k_ref_count_dec(thr, 1);
    else
      efab_thr_release(thr);
  }
  return rc;
}