Ejemplo n.º 1
0
ci_inline int
ci_udp_recvmsg_socklocked_spin(ci_udp_iomsg_args* a,
                               ci_netif* ni, ci_udp_state* us,
                               struct recvmsg_spinstate* spin_state)
{
  ci_uint64 now_frc;
  int intf_i;

  ci_frc64(&now_frc);
  if( now_frc - spin_state->start_frc < spin_state->max_spin ) {
#if CI_CFG_SPIN_STATS
    ni->state->stats.spin_udp_recv++;
#endif
    if( ci_netif_may_poll(ni) ) {
      OO_STACK_FOR_EACH_INTF_I(ni, intf_i)
        if( ci_netif_intf_has_event(ni, intf_i) && ci_netif_trylock(ni) ) {
          ci_netif_poll_intf_fast(ni, intf_i, now_frc);
          ci_netif_unlock(ni);
          if( ci_udp_recv_q_readable(ni, us) )
            return 0;
        }
      if( ni->state->poll_work_outstanding ||
          ci_netif_need_timer_prime(ni, now_frc) )
        if( ci_netif_trylock(ni) ) {
          ci_netif_poll(ni);
          ci_netif_unlock(ni);
        }
      if( ! ni->state->is_spinner )
        ni->state->is_spinner = 1;
    }
    return OO_SPINLOOP_PAUSE_CHECK_SIGNALS(ni, now_frc, 
                                           &spin_state->schedule_frc,
                                           us->s.so.rcvtimeo_msec,
                                           &us->s.b, spin_state->si);
  }
Ejemplo n.º 2
0
/* The following routine has been obtained sfnettest code */
static int ci_measure_cpu_khz(unsigned* cpu_khz)
{
    int interval_usec = 100000;
    struct timeval tv_s, tv_e;
    uint64_t tsc_s, tsc_e, tsc_e2;
    uint64_t tsc_gtod, min_tsc_gtod, usec = 0;
    int n, skew = 0;

    ci_frc64(&tsc_s);
    gettimeofday(&tv_s, NULL);
    ci_frc64(&tsc_e2);
    min_tsc_gtod = tsc_e2 - tsc_s;
    n = 0;
    do {
        ci_frc64(&tsc_s);
        gettimeofday(&tv_s, NULL);
        ci_frc64(&tsc_e2);
        tsc_gtod = tsc_e2 - tsc_s;
        if( tsc_gtod < min_tsc_gtod )
            min_tsc_gtod = tsc_gtod;
    } while( ++n < 20 || (tsc_gtod > min_tsc_gtod * 2 && n < 100) );

    do {
        ci_frc64(&tsc_e);
        gettimeofday(&tv_e, NULL);
        ci_frc64(&tsc_e2);
        if( tsc_e2 < tsc_e ) {
            skew = 1;
            break;
        }
        tsc_gtod = tsc_e2 - tsc_e;
        usec = (tv_e.tv_sec - tv_s.tv_sec) * (uint64_t) 1000000;
        usec += tv_e.tv_usec - tv_s.tv_usec;
    } while( usec < interval_usec || tsc_gtod > min_tsc_gtod * 2 );

    if( skew )
        return 0;
    *cpu_khz = (tsc_e - tsc_s) * 1000 / usec;
    return 1;
}
Ejemplo n.º 3
0
static void oo_epoll2_wait(struct oo_epoll_private *priv,
                           struct oo_epoll2_action_arg *op)
{
  /* This function uses oo_timesync_cpu_khz but we do not want to
   * block here for it to stabilize.  So we already blocked in
   * oo_epoll_fop_open().
   */

  ci_uint64 start_frc = 0, now_frc = 0; /* =0 to make gcc happy */
  tcp_helper_resource_t* thr;
  ci_netif* ni;
  unsigned i;
  ci_int32 timeout = op->timeout;

  /* Get the start of time. */
  if( timeout > 0 || ( timeout < 0 && op->spin_cycles ) )
    ci_frc64(&start_frc);

  /* Declare that we are spinning - even if we are just polling */
  OO_EPOLL_FOR_EACH_STACK(priv, i, thr, ni)
    ci_atomic32_inc(&ni->state->n_spinners);

  /* Poll each stack for events */
  op->rc = -ENOEXEC; /* impossible value */
  OO_EPOLL_FOR_EACH_STACK(priv, i, thr, ni) {
    if( ci_netif_may_poll(ni) && ci_netif_has_event(ni) &&
        ci_netif_trylock(ni) ) {
      int did_wake;
      ni->state->poll_did_wake = 0;
      ci_netif_poll(ni);
      did_wake = ni->state->poll_did_wake;
      ci_netif_unlock(ni);

      /* Possibly, we've got necessary event.  If true, exit */
      if( did_wake ) {
        op->rc = efab_linux_sys_epoll_wait(op->kepfd,
                                           CI_USER_PTR_GET(op->events),
                                           op->maxevents, 0);
        if( op->rc != 0 )
          goto do_exit;
      }
    }
  }

  /* Do we have anything to do? */
  if( op->rc == -ENOEXEC ) {
    /* never called sys_epoll_wait() - do it! */

    op->rc = efab_linux_sys_epoll_wait(op->kepfd, CI_USER_PTR_GET(op->events),
                                       op->maxevents, 0);
  }
  if( op->rc != 0 || timeout == 0 )
    goto do_exit;

  /* Fixme: eventually, remove NO_USERLAND stacks from this list.
   * Here is a good moment: we are going to spin or block, so there are
   * a lot of time.  But avoid locking! */

  /* Spin for a while. */
  if( op->spin_cycles ) {
    ci_uint64 schedule_frc;
    ci_uint64 max_spin = op->spin_cycles;
    int spin_limited_by_timeout = 0;
    ci_assert(start_frc);

    if( timeout > 0) {
      ci_uint64 max_timeout_spin = (ci_uint64)timeout * oo_timesync_cpu_khz;
      if( max_timeout_spin <= max_spin ) {
        max_spin = max_timeout_spin;
        spin_limited_by_timeout = 1;
      }
    }

    /* spin */
    now_frc = schedule_frc = start_frc;
    do {
      if(unlikely( signal_pending(current) )) {
        op->rc = -EINTR; /* epoll_wait returns EINTR, not ERESTARTSYS! */
        goto do_exit;
      }

      OO_EPOLL_FOR_EACH_STACK(priv, i, thr, ni) {
#if CI_CFG_SPIN_STATS
        ni->state->stats.spin_epoll_kernel++;
#endif
        if( ci_netif_may_poll(ni) &&
            ci_netif_need_poll_spinning(ni, now_frc) &&
            ci_netif_trylock(ni) ) {
          ci_netif_poll(ni);
          ci_netif_unlock(ni);
        }
      }

      op->rc = efab_linux_sys_epoll_wait(op->kepfd, CI_USER_PTR_GET(op->events),
                                         op->maxevents, 0);
      if( op->rc != 0 )
        goto do_exit;

      ci_frc64(&now_frc);
      if(unlikely( now_frc - schedule_frc > oo_timesync_cpu_khz )) {
        schedule(); /* schedule() every 1ms */
        schedule_frc = now_frc;
      }
      else
        ci_spinloop_pause();
    } while( now_frc - start_frc < max_spin );