コード例 #1
0
ファイル: iptimer.c プロジェクト: majek/openonload
/* unpick the ci_ip_timer structure to actually do the callback */ 
static void ci_ip_timer_docallback(ci_netif *netif, ci_ip_timer* ts)
{
  ci_assert( TIME_LE(ts->time, ci_ip_time_now(netif)) );
  ci_assert( ts->time == IPTIMER_STATE(netif)->sched_ticks );

  switch(ts->fn){
  case CI_IP_TIMER_TCP_RTO:
    CHECK_TS(netif, SP_TO_TCP(netif, ts->param1));
    ci_tcp_timeout_rto(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_TCP_DELACK:
    CHECK_TS(netif, SP_TO_TCP(netif, ts->param1));
    ci_tcp_timeout_delack(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_TCP_ZWIN:
    CHECK_TS(netif, SP_TO_TCP(netif, ts->param1));
    ci_tcp_timeout_zwin(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_TCP_KALIVE:
    CHECK_TS(netif, SP_TO_TCP(netif, ts->param1));
    ci_tcp_timeout_kalive(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_TCP_LISTEN:
    ci_tcp_timeout_listen(netif, SP_TO_TCP_LISTEN(netif, ts->param1));    
    break;
  case CI_IP_TIMER_TCP_CORK:
    ci_tcp_timeout_cork(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_NETIF_TIMEOUT:
    ci_netif_timeout_state(netif);
    break;
  case CI_IP_TIMER_PMTU_DISCOVER:
    ci_pmtu_timeout_pmtu(netif, SP_TO_TCP(netif, ts->param1));
    break;
#if CI_CFG_TCP_SOCK_STATS
  case CI_IP_TIMER_TCP_STATS:
	ci_tcp_stats_action(netif, SP_TO_TCP(netif, ts->param1), 
                        CI_IP_STATS_FLUSH, 
                        CI_IP_STATS_OUTPUT_NONE, NULL, NULL );
    break;
#endif
#if CI_CFG_SUPPORT_STATS_COLLECTION
  case CI_IP_TIMER_NETIF_STATS:
    ci_netif_stats_action(netif, CI_IP_STATS_FLUSH,
                          CI_IP_STATS_OUTPUT_NONE, NULL, NULL );
    break;
#endif
#if CI_CFG_IP_TIMER_DEBUG
  case CI_IP_TIMER_DEBUG_HOOK:
    ci_ip_timer_debug_fn(netif, ts->link.addr, ts->param1);
    break;
#endif
  default:
    LOG_U(log( LPF "unknown timer callback code:%x param1:%d",
	       ts->fn, OO_SP_FMT(ts->param1)));    
    CI_DEBUG(ci_fail_stop_fn());
  }  
}
コード例 #2
0
ファイル: iptimer.c プロジェクト: majek/openonload
/* initialise the iptimer scheduler */
void ci_ip_timer_state_init(ci_netif* netif, unsigned cpu_khz)
{
  ci_ip_timer_state* ipts = IPTIMER_STATE(netif);
  int i;
  int us2isn;

  /* initialise the cycle to tick constants */
  ipts->khz = cpu_khz;
  ipts->ci_ip_time_frc2tick = shift_for_gran(CI_IP_TIME_APP_GRANULARITY, ipts->khz);
  ipts->ci_ip_time_frc2us = shift_for_gran(1, ipts->khz);

  /* The Linux kernel ticks the initial sequence number that it would use for
   * a given tuple every 64 ns.  Onload does the same, when using
   * EF_TCP_ISN_MODE=clocked. However in EF_TCP_ISN_MODE=clocked+cache our use
   * of the clock-driven ISN is slightly different, though, as we remember
   * old sequence numbers in the case where the clock-driven ISN is not known
   * to be safe.  As such, we don't need it to tick so fast, and so we let it
   * tick at most every 256 ns.  This means that it takes more than eight
   * minutes to wrap by half, while four minutes is our assumed maximum
   * peer-MSL.  This in practice reduces the cases in which we have to
   * remember old sequence numbers. */
  us2isn = NI_OPTS(netif).tcp_isn_mode != 0 ? 2 : 4;
  ipts->ci_ip_time_frc2isn = ipts->ci_ip_time_frc2us > us2isn ?
                             ipts->ci_ip_time_frc2us - us2isn : 0;

  ci_ip_time_initial_sync(ipts);
  ipts->sched_ticks = ci_ip_time_now(netif);
  ipts->closest_timer = ipts->sched_ticks + IPTIME_INFINITY;

  /* To convert ms to ticks we will use fixed point arithmetic
   * Calculate conversion factor, which is expected to be in range <0.5,1]
   * */
  ipts->ci_ip_time_ms2tick_fxp =
    (((ci_uint64)ipts->khz) << 32) /
    (1u << ipts->ci_ip_time_frc2tick);
  ci_assert_gt(ipts->ci_ip_time_ms2tick_fxp, 1ull<<31);
  ci_assert_le(ipts->ci_ip_time_ms2tick_fxp, 1ull<<32);

  /* set module specific time constants dependent on frc2tick */
  ci_tcp_timer_init(netif);

  ci_ni_dllist_init(netif, &ipts->fire_list,
		    oo_ptr_to_statep(netif, &ipts->fire_list),
                    "fire");
  
  /* Initialise the wheel lists. */
  for( i=0; i < CI_IPTIME_WHEELSIZE; i++)
    ci_ni_dllist_init(netif, &ipts->warray[i],
		      oo_ptr_to_statep(netif, &ipts->warray[i]),
                      "timw");
}
コード例 #3
0
ファイル: iptimer.c プロジェクト: majek/openonload
void ci_ip_timer_debug(ci_netif* netif, int timer_id, int param) {
  LOG_ITV(log( LPF "netif=%lx  timer_id=%u  param=%x  now=%u",
	       (unsigned long)netif, timer_id, param, 
	       ci_ip_time_now(netif)));  
}
コード例 #4
0
ファイル: iptimer.c プロジェクト: majek/openonload
/* run any pending timers */
void ci_ip_timer_poll(ci_netif *netif) {
  ci_ip_timer_state* ipts = IPTIMER_STATE(netif); 
  ci_iptime_t* stime = &ipts->sched_ticks;
  ci_ip_timer* ts;
  ci_iptime_t rtime;
  ci_ni_dllist_link* link;
  int changed = 0;

  /* The caller is expected to ensure that the current time is sufficiently
  ** up-to-date.
  */
  rtime = ci_ip_time_now(netif);
  /* check for sanity i.e. time always goes forwards */
  ci_assert( TIME_GE(rtime, *stime) );

  /* bug chasing Bug 2855 - check the temp list used is OK before we start */
  ci_assert( ci_ni_dllist_is_valid(netif, &ipts->fire_list.l) );
  ci_assert( ci_ni_dllist_is_empty(netif, &ipts->fire_list));

  while( TIME_LT(*stime, rtime) ) {

    DETAILED_CHECK_TIMERS(netif);

    /* advance the schedulers view of time */
    (*stime)++;

    /* cascade through wheels if reached end of current wheel */
    if(BUCKETNO(0, *stime) == 0) {
      if(BUCKETNO(1, *stime) == 0) {
	if(BUCKETNO(2, *stime) == 0) {
	  ci_ip_timer_cascadewheel(netif, 3, *stime);
	}
	ci_ip_timer_cascadewheel(netif, 2, *stime);
      }
      changed = ci_ip_timer_cascadewheel(netif, 1, *stime);
    }


    /* Bug 1828: We need to be creaful here ... because:
        - ci_ip_timer_docallback can set/clear timers
        - the timers being set/cleared may not necessarily be the ones firing
        - however, they could be in this bucket
       In summary, need to ensure the ni_dllist stays valid at all times so 
       safe to call. Slightly complicated by the case that its not possible to
       hold indirected linked lists on the stack */
    ci_assert( ci_ni_dllist_is_valid(netif, &ipts->fire_list.l));
    ci_assert( ci_ni_dllist_is_empty(netif, &ipts->fire_list));

    /* run timers in the current bucket */
    ci_ni_dllist_rehome( netif,
                         &ipts->fire_list,
                         &ipts->warray[BUCKETNO(0, *stime)] );
    DETAILED_CHECK_TIMERS(netif);

    while( (link = ci_ni_dllist_try_pop(netif, &ipts->fire_list)) ) {

      ts = LINK2TIMER(link);

      ci_assert_equal(ts->time, *stime);

      /* ensure time marked as NOT pending */
      ci_ni_dllist_self_link(netif, &ts->link);

      /* callback safe to set/clear this or other timers */
      ci_ip_timer_docallback(netif, ts);
    }
    ci_assert( ci_ni_dllist_is_valid(netif, &ipts->fire_list.l) );
    ci_assert( ci_ni_dllist_is_empty(netif, &ipts->fire_list));

    DETAILED_CHECK_TIMERS(netif);
  }
  
  ci_assert( ci_ni_dllist_is_valid(netif, &ipts->fire_list.l) );
  ci_assert( ci_ni_dllist_is_empty(netif, &ipts->fire_list));

  /* What is our next timer?
   * Let's update if our previous "closest" timer have already been
   * handled, or if the previous estimation was "infinity". */
  if( TIME_GE(ipts->sched_ticks, ipts->closest_timer) ||
      (changed &&
       ipts->closest_timer - ipts->sched_ticks > IPTIME_INFINITY_LOW) ) {
    /* we peek into the first wheel only */
    ci_iptime_t base = ipts->sched_ticks & WHEEL0_MASK;
    ci_iptime_t b = ipts->sched_ticks - base;
    for( b++ ; b < CI_IPTIME_BUCKETS; b++ ) {
      if( !ci_ni_dllist_is_empty(netif, &ipts->warray[b]) ) {
        ipts->closest_timer = base + b;
        return;
      }
    }

    /* We do not know the next timer.  Set it to a sort of infinity. */
    ipts->closest_timer = ipts->sched_ticks + IPTIME_INFINITY;
  }
}
コード例 #5
0
ファイル: tcp_sockopts.c プロジェクト: ido/openonload
static int
ci_tcp_info_get(ci_netif* netif, ci_sock_cmn* s, struct ci_tcp_info* info)
{
  ci_iptime_t now = ci_ip_time_now(netif);

  memset(info, 0, sizeof(*info));

  info->tcpi_state = ci_sock_states_linux_map[CI_TCP_STATE_NUM(s->b.state)];
  /* info->tcpi_backoff = 0; */

  info->tcpi_ato = 
    ci_ip_time_ticks2ms(netif, netif->state->conf.tconst_delack) * 1000;
  info->tcpi_rcv_mss    = 536; /* no way to get the actual value */
  /* info->tcpi_sacked     = 0; */ /* there is no way to get any of these */
  /* info->tcpi_lost       = 0; */
  /* info->tcpi_fackets    = 0; */
  /* info->tcpi_reordering = 0; */
  /* info->tcpi_last_ack_sent = 0; */
  /* info->tcpi_last_ack_recv = 0; */

  if( s->b.state != CI_TCP_LISTEN ) {
    ci_tcp_state* ts = SOCK_TO_TCP(s);

    info->tcpi_pmtu       = ts->pmtus.pmtu;
    info->tcpi_ca_state = sock_congstate_linux_map[ts->congstate];
    info->tcpi_retransmits = ts->retransmits;
    info->tcpi_probes = ts->ka_probes;

    /* info->tcpi_options = 0; */
    if( ts->tcpflags & CI_TCPT_FLAG_TSO )
      info->tcpi_options |= CI_TCPI_OPT_TIMESTAMPS;
    if( ts->tcpflags & CI_TCPT_FLAG_ECN )
      info->tcpi_options |= CI_TCPI_OPT_ECN;
    if( ts->tcpflags & CI_TCPT_FLAG_SACK )
      info->tcpi_options |= CI_TCPI_OPT_SACK;

    if( ts->tcpflags & CI_TCPT_FLAG_WSCL ) {
      info->tcpi_options |= CI_TCPI_OPT_WSCALE;
      info->tcpi_snd_wscale = ts->snd_wscl;
      info->tcpi_rcv_wscale = ts->rcv_wscl;
    }

    info->tcpi_rto = ci_ip_time_ticks2ms(netif, ts->rto) * 1000;
    info->tcpi_snd_mss    = ts->eff_mss;
    info->tcpi_unacked    = ts->acks_pending & CI_TCP_ACKS_PENDING_MASK;
#if CI_CFG_TCP_SOCK_STATS
    info->tcpi_retrans    = ts->stats_cumulative.count.tx_retrans_pkt;
#endif
#if CI_CFG_CONGESTION_WINDOW_VALIDATION
    info->tcpi_last_data_sent = ci_ip_time_ticks2ms(netif,
						    now - ts->t_last_sent);
#else
    info->tcpi_last_data_sent = 0;
#endif
    info->tcpi_last_data_recv = ci_ip_time_ticks2ms(netif,
						    now - ts->tspaws);
    
    info->tcpi_rtt = ci_ip_time_ticks2ms(netif, ts->sa) * 1000 / 8;
    info->tcpi_rttvar = ci_ip_time_ticks2ms(netif, ts->sv) * 1000 / 4;
    info->tcpi_rcv_ssthresh = ts->ssthresh;
    if( tcp_eff_mss(ts) != 0 ) {
      info->tcpi_snd_ssthresh = ts->ssthresh / tcp_eff_mss(ts);
      info->tcpi_snd_cwnd     = ts->cwnd / tcp_eff_mss(ts);
    }
    else { /* non-initialised connection */
      info->tcpi_snd_ssthresh = 0;
      info->tcpi_snd_cwnd     = 0;
    }
    info->tcpi_advmss     = ts->amss;
  }

  return 0;
}