static int ci_tcp_listen_init(ci_netif *ni, ci_tcp_socket_listen *tls) { int i; oo_p sp; tls->acceptq_n_in = tls->acceptq_n_out = 0; tls->acceptq_put = CI_ILL_END; tls->acceptq_get = OO_SP_NULL; tls->n_listenq = 0; tls->n_listenq_new = 0; /* Allocate and initialise the listen bucket */ if( OO_P_IS_NULL(ni->state->free_aux_mem) ) return -ENOBUFS; tls->bucket = ni->state->free_aux_mem; ci_tcp_bucket_alloc(ni); tls->n_buckets = 1; /* Initialise the listenQ. */ for( i = 0; i <= CI_CFG_TCP_SYNACK_RETRANS_MAX; ++i ) { sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, listenq[i])); ci_ni_dllist_init(ni, &tls->listenq[i], sp, "lstq"); } /* Initialize the cache and pending lists for the EP-cache. * See comment at definition for details */ LOG_EP (log ("Initialise cache and pending list for id %d", S_FMT(tls))); #if CI_CFG_FD_CACHING sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, epcache_cache)); ci_ni_dllist_init(ni, &tls->epcache_cache, sp, "epch"); sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, epcache_pending)); ci_ni_dllist_init(ni, &tls->epcache_pending, sp, "eppd"); sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, epcache_connected)); ci_ni_dllist_init(ni, &tls->epcache_connected, sp, "epco"); sp = TS_OFF(ni, tls); OO_P_ADD(sp, CI_MEMBER_OFFSET(ci_tcp_socket_listen, epcache_fd_states)); ci_ni_dllist_init(ni, &tls->epcache_fd_states, sp, "ecfd"); tls->cache_avail_sock = ni->state->opts.per_sock_cache_max; #endif return 0; }
/* initialise the iptimer scheduler */ void ci_ip_timer_state_init(ci_netif* netif, unsigned cpu_khz) { ci_ip_timer_state* ipts = IPTIMER_STATE(netif); int i; int us2isn; /* initialise the cycle to tick constants */ ipts->khz = cpu_khz; ipts->ci_ip_time_frc2tick = shift_for_gran(CI_IP_TIME_APP_GRANULARITY, ipts->khz); ipts->ci_ip_time_frc2us = shift_for_gran(1, ipts->khz); /* The Linux kernel ticks the initial sequence number that it would use for * a given tuple every 64 ns. Onload does the same, when using * EF_TCP_ISN_MODE=clocked. However in EF_TCP_ISN_MODE=clocked+cache our use * of the clock-driven ISN is slightly different, though, as we remember * old sequence numbers in the case where the clock-driven ISN is not known * to be safe. As such, we don't need it to tick so fast, and so we let it * tick at most every 256 ns. This means that it takes more than eight * minutes to wrap by half, while four minutes is our assumed maximum * peer-MSL. This in practice reduces the cases in which we have to * remember old sequence numbers. */ us2isn = NI_OPTS(netif).tcp_isn_mode != 0 ? 2 : 4; ipts->ci_ip_time_frc2isn = ipts->ci_ip_time_frc2us > us2isn ? ipts->ci_ip_time_frc2us - us2isn : 0; ci_ip_time_initial_sync(ipts); ipts->sched_ticks = ci_ip_time_now(netif); ipts->closest_timer = ipts->sched_ticks + IPTIME_INFINITY; /* To convert ms to ticks we will use fixed point arithmetic * Calculate conversion factor, which is expected to be in range <0.5,1] * */ ipts->ci_ip_time_ms2tick_fxp = (((ci_uint64)ipts->khz) << 32) / (1u << ipts->ci_ip_time_frc2tick); ci_assert_gt(ipts->ci_ip_time_ms2tick_fxp, 1ull<<31); ci_assert_le(ipts->ci_ip_time_ms2tick_fxp, 1ull<<32); /* set module specific time constants dependent on frc2tick */ ci_tcp_timer_init(netif); ci_ni_dllist_init(netif, &ipts->fire_list, oo_ptr_to_statep(netif, &ipts->fire_list), "fire"); /* Initialise the wheel lists. */ for( i=0; i < CI_IPTIME_WHEELSIZE; i++) ci_ni_dllist_init(netif, &ipts->warray[i], oo_ptr_to_statep(netif, &ipts->warray[i]), "timw"); }
/* take the bucket corresponding to time t in the given wheel and ** reinsert them back into the wheel (i.e. into wheelno -1) */ static int ci_ip_timer_cascadewheel(ci_netif* netif, int wheelno, ci_iptime_t stime) { ci_ip_timer* ts; ci_ni_dllist_t* bucket; oo_p curid, buckid; int changed = 0; ci_assert(wheelno > 0 && wheelno < CI_IPTIME_WHEELS); /* check time is on the boundary expected by the wheel number passed in */ ci_assert( (stime & ((unsigned)(-1) << (CI_IPTIME_BUCKETBITS*wheelno))) == stime ); /* bucket to empty */ bucket = BUCKET(netif, wheelno, stime); buckid = ci_ni_dllist_link_addr(netif, &bucket->l); curid = bucket->l.next; LOG_ITV(log(LN_FMT "cascading wheel=%u sched_ticks=0x%x bucket=%i", LN_PRI_ARGS(netif), wheelno, stime, BUCKETNO(wheelno, stime))); /* ditch the timers in this dll, pointers held in curid and buckid */ ci_ni_dllist_init(netif, bucket, ci_ni_dllist_link_addr(netif, &bucket->l), "timw"); while( ! OO_P_EQ(curid, buckid) ) { ts = ADDR2TIMER(netif, curid); /* get next in linked list */ curid = ts->link.next; #ifndef NDEBUG { /* if inserting in wheel 0 - top 3 wheels must have the same time */ if (wheelno == 1) ci_assert( (stime & WHEEL0_MASK) == (ts->time & WHEEL0_MASK) ); /* else, if inserting in wheel 1 - top 2 wheels must have the same time */ else if (wheelno == 2) ci_assert( (stime & WHEEL1_MASK) == (ts->time & WHEEL1_MASK) ); /* else, if inserting in wheel 2 - the top wheel must have the same time */ else { ci_assert(wheelno == 3); ci_assert( (stime & WHEEL2_MASK) == (ts->time & WHEEL2_MASK) ); } } #endif /* insert ts into wheel below */ bucket = BUCKET(netif, wheelno-1, ts->time); changed = 1; /* append onto the correct bucket ** ** NB this might not be stable because a later insert with a ** smaller relative time will be before an earlier insert with a ** larger relative time. Oh well doesn't really matter */ ci_ni_dllist_push_tail(netif, bucket, &ts->link); ci_assert(ci_ip_timer_is_link_valid(netif, ts)); } return changed; }