static int citp_udp_bind(citp_fdinfo* fdinfo, const struct sockaddr* sa, socklen_t sa_len) { citp_sock_fdi *epi = fdi_to_sock_fdi(fdinfo); citp_socket* ep = &epi->sock; ci_sock_cmn* s = ep->s; int rc; Log_V(log(LPF "bind(%d, sa, %d)", fdinfo->fd, sa_len)); ci_udp_handle_force_reuseport(fdinfo->fd, ep, sa, sa_len); if( (s->s_flags & CI_SOCK_FLAG_REUSEPORT) != 0 ) { if( (rc = ci_udp_reuseport_bind(ep, fdinfo->fd, sa, sa_len)) == 0 ) { /* The socket has moved so need to reprobe the fd. This will also * map the the new stack into user space of the executing process. */ fdinfo = citp_fdtable_lookup(fdinfo->fd); fdinfo = citp_reprobe_moved(fdinfo, CI_FALSE); epi = fdi_to_sock_fdi(fdinfo); ep = &epi->sock; ci_netif_cluster_prefault(ep->netif); } else { goto done; } } ci_netif_lock_fdi(epi); rc = ci_udp_bind(ep, fdinfo->fd, sa, sa_len); ci_netif_unlock_fdi(epi); done: if( rc == CI_SOCKET_HANDOVER ) { ci_assert_equal(s->s_flags & CI_SOCK_FLAG_REUSEPORT_LEGACY, 0); CITP_STATS_NETIF(++epi->sock.netif->state->stats.udp_handover_bind); citp_fdinfo_handover(fdinfo, -1); return 0; } citp_fdinfo_release_ref( fdinfo, 0 ); return rc; }
citp_fdinfo* citp_fdtable_lookup_fast(citp_lib_context_t* ctx, unsigned fd) { /* Note that if we haven't yet initialised this module, then ** [inited_count] will be zero, and the following test will fail. So the ** test for initialisation is done further down... ** ** This is highly performance critial. DO NOT add any code between here ** and the first [return] statement. */ citp_fdinfo* fdi; /* Try to avoid entering lib. */ ctx->thread = NULL; if( fd < citp_fdtable.inited_count ) { volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip; citp_fdinfo_p fdip; again: fdip = *p_fdip; if( fdip_is_normal(fdip) ) { citp_enter_lib_if(ctx); if( citp_fdtable_is_mt_safe() ) { /* No need to use atomic ops or add a ref to the fdi when MT-safe. * The definition of "fds_mt_safe" is that the app does not change * the meaning of a file descriptor in one thread when it is being * used in another thread. */ fdi = fdip_to_fdi(fdip); if( ! citp_fdinfo_is_consistent(fdi) ) fdi = citp_reprobe_moved(fdi, CI_TRUE, CI_FALSE); return fdi; } else { /* Swap in the busy marker. */ if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) { fdi = fdip_to_fdi(fdip); ci_assert(fdi); ci_assert_gt(oo_atomic_read(&fdi->ref_count), 0); ci_assert(fdip_is_closing(fdip) || fdip_is_reserved(fdip) || fdi->fd == fd); /* Bump the reference count. */ citp_fdinfo_ref(fdi); if( ! citp_fdinfo_is_consistent(fdi) ) fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_TRUE); else { /* Swap the busy marker out again. */ citp_fdtable_busy_clear(fd, fdip, 0); } return fdi; } goto again; } } /* Not normal! */ if( fdip_is_passthru(fdip) ) return NULL; citp_enter_lib_if(ctx); if( fdip_is_busy(fdip) ) { citp_fdtable_busy_wait(fd, 0); goto again; } ci_assert(fdip_is_unknown(fdip)); goto probe; } if( citp.init_level < CITP_INIT_FDTABLE ) { if( _citp_do_init_inprogress == 0 ) CI_TRY(citp_do_init(CITP_INIT_ALL)); else CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */ } if( fd >= citp_fdtable.size ) return NULL; probe: citp_enter_lib_if(ctx); fdi = citp_fdtable_probe(fd); if( fdi && citp_fdtable_is_mt_safe() ) citp_fdinfo_release_ref(fdi, 0); return fdi; }
citp_fdinfo * citp_fdtable_lookup(unsigned fd) { /* Note that if we haven't yet initialised this module, then ** [inited_count] will be zero, and the following test will fail. So the ** test for initialisation is done further down... ** ** This is highly performance critial. DO NOT add any code between here ** and the first [return] statement. */ citp_fdinfo* fdi; /* In some cases, we'll lock fdtable. Assert that it is possible: */ ci_assert(oo_per_thread_get()->sig.inside_lib); if( fd < citp_fdtable.inited_count ) { volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip; citp_fdinfo_p fdip; again: /* Swap in the busy marker. */ fdip = *p_fdip; if( fdip_is_normal(fdip) ) { if( citp_fdtable_not_mt_safe() ) { if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) { fdi = fdip_to_fdi(fdip); ci_assert(fdi); ci_assert_gt(oo_atomic_read(&fdi->ref_count), 0); ci_assert(fdip_is_closing(fdip) || fdip_is_reserved(fdip) || fdi->fd == fd); /* Bump the reference count. */ citp_fdinfo_ref(fdi); if( ! citp_fdinfo_is_consistent(fdi) ) { /* Something is wrong. Re-probe. */ fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_TRUE); } else { /* Swap the busy marker out again. */ citp_fdtable_busy_clear(fd, fdip, 0); } return fdi; } goto again; } else { /* No need to use atomic ops when single-threaded. The definition * of "fds_mt_safe" is that the app does not change the meaning of * a file descriptor in one thread when it is being used in another * thread. In that case I'm hoping this should be safe, but at * time of writing I'm really not confident. (FIXME). */ fdi = fdip_to_fdi(fdip); if( ci_is_multithreaded() ) citp_fdinfo_ref(fdi); else ++fdi->ref_count.n; if( ! citp_fdinfo_is_consistent(fdi) ) fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_FALSE); return fdi; } } /* Not normal! */ if( fdip_is_passthru(fdip) ) return NULL; if( fdip_is_busy(fdip) ) { citp_fdtable_busy_wait(fd, 0); goto again; } ci_assert(fdip_is_unknown(fdip)); goto probe; } if (citp.init_level < CITP_INIT_FDTABLE) { if (_citp_do_init_inprogress == 0) CI_TRY(citp_do_init(CITP_INIT_ALL)); else CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */ } if( fd >= citp_fdtable.size ) return NULL; probe: fdi = citp_fdtable_probe(fd); return fdi; }