void citp_waitable_obj_free_to_cache(ci_netif* ni, citp_waitable* w) { #if defined (__KERNEL__) && !defined(NDEBUG) /* There should be no non-atomic work queued for endpoints going to cache - * they don't get their filters removed. */ tcp_helper_endpoint_t* ep = ci_netif_get_valid_ep(ni, w->bufid); ci_assert(!(ep->ep_aflags & OO_THR_EP_AFLAG_NON_ATOMIC)); #endif ci_assert(!(w->sb_aflags & CI_SB_AFLAG_ORPHAN)); ci_assert(w->sb_aflags & CI_SB_AFLAG_NOT_READY); ci_assert(w->sb_aflags & CI_SB_AFLAG_IN_CACHE); ci_assert(w->state == CI_TCP_CLOSED); ci_assert(ci_ni_dllist_is_self_linked(ni, &w->post_poll_link)); ci_assert(OO_SP_IS_NULL(w->wt_next)); /* This resets a subset of the state done by __citp_waitable_obj_free. * We do not set the orphan flag, as cached endpoints remain attached. * We do not alter the state, as that too remains accurate. * * We preserve cache related aflags. If the endpoint is freed before being * accepted from the cache then these will be cleared when * __citp_waitable_obj_free is called, otherwise they'll be checked for * correctness, and updated if necessary when the socket is accepted. */ w->wake_request = 0; w->sb_flags = 0; ci_atomic32_and(&w->sb_aflags, CI_SB_AFLAG_NOT_READY | CI_SB_AFLAG_CACHE_PRESERVE); w->lock.wl_val = 0; w->ready_list_id = 0; CI_USER_PTR_SET(w->eitem, NULL); }
void ci_udp_state_free(ci_netif* ni, ci_udp_state* us) { ci_assert(ci_netif_is_locked(ni)); ci_assert(us->s.b.state == CI_TCP_STATE_UDP); ci_assert(ci_ni_dllist_is_self_linked(ni, &us->s.b.post_poll_link)); ci_udp_recv_q_drop(ni, &us->timestamp_q); citp_waitable_obj_free(ni, &us->s.b); }
static void __citp_waitable_obj_free(ci_netif* ni, citp_waitable* w) { ci_assert(w->sb_aflags & CI_SB_AFLAG_ORPHAN); ci_assert(w->state != CI_TCP_STATE_FREE); ci_assert(ci_ni_dllist_is_self_linked(ni, &w->post_poll_link)); ci_assert(OO_SP_IS_NULL(w->wt_next)); w->wake_request = 0; w->sb_flags = 0; w->sb_aflags = CI_SB_AFLAG_ORPHAN | CI_SB_AFLAG_NOT_READY; w->state = CI_TCP_STATE_FREE; w->lock.wl_val = 0; w->ready_list_id = 0; CI_USER_PTR_SET(w->eitem, NULL); }
static int efab_file_move_supported_tcp(ci_netif *ni, ci_tcp_state *ts) { #if CI_CFG_FD_CACHING /* Don't support moving cached sockets for now */ if( ci_tcp_is_cached(ts) || !ci_ni_dllist_is_self_linked(ni, &ts->epcache_link) ) return false; #endif /* TCP closed: supported */ if( ts->s.b.state == CI_TCP_CLOSED ) return true; /* everything except TCP connected is not supported */ if( !(ts->s.b.state & CI_TCP_STATE_TCP_CONN) ) return false; if( ts->local_peer != OO_SP_NULL ) return false; if( !(ts->tcpflags & CI_TCPT_FLAG_PASSIVE_OPENED) ) return false; /* send queue is not supported * NB: retrans_ptr is uninitialised when retrans was not used yet, * so do not check for !OO_PP_IS_NULL(ts->retrans_ptr) */ if( !ci_ip_queue_is_empty(&ts->send) || ts->send_prequeue != OO_PP_ID_NULL || oo_atomic_read(&ts->send_prequeue_in) != 0 || !ci_ip_queue_is_empty(&ts->retrans) || ci_ip_timer_pending(ni, &ts->rto_tid) || ci_ip_timer_pending(ni, &ts->zwin_tid) || #if CI_CFG_TAIL_DROP_PROBE ci_ip_timer_pending(ni, &ts->taildrop_tid) || #endif ci_ip_timer_pending(ni, &ts->cork_tid) ) return false; /* Sockets with allocated templates are not supported */ if( OO_PP_NOT_NULL(ts->tmpl_head) ) return false; return true; }