/*! Run signal handler immediatedly, just now. ** \param signum Signal number ** \param info Saved info for sa_sigaction handler ** \param context Saved context for sa_sigaction handler ** \param our_info Our signal info */ ci_inline void citp_signal_run_now(int signum, siginfo_t *info, void *context, citp_signal_info *our_info) { int need_restart; LOG_SIG(log("%s: SIGNAL %d - run immediately", __FUNCTION__, signum)); /* Try to keep order: old signals first, and need_restart is from the * last one */ if (our_info && (our_info->aflags & OO_SIGNAL_FLAG_HAVE_PENDING)) citp_signal_run_pending(our_info); need_restart = citp_signal_run_app_handler(signum, info, context); /* Set need_restart flag in accordance with sa_restart. * The last signal wins, so we set need_restart to 1 if necessary. */ if (our_info) { LOG_SIG(log("%s: SIGNAL %d - set need restart flag to %d", __FUNCTION__, signum, need_restart)); if( need_restart ) ci_atomic32_or(&our_info->aflags, OO_SIGNAL_FLAG_NEED_RESTART); else ci_atomic32_and(&our_info->aflags, ~OO_SIGNAL_FLAG_NEED_RESTART); } }
/*! Mark a signal as pending. * Should be called from signal handler only. * ** \param signum Signal number ** \param info Saved info for sa_sigaction handler ** \param context Saved context for sa_sigaction handler ** \param our_info Our signal info */ ci_inline void citp_signal_set_pending(int signum, siginfo_t *info, void *context, citp_signal_info *our_info) { int i; ci_assert(our_info->inside_lib); for( i = 0; i < OO_SIGNAL_MAX_PENDING; i++ ) { if( our_info->signals[i].signum ) continue; if( ci_cas32_fail(&our_info->signals[i].signum, 0, signum) ) continue; LOG_SIG(log("%s: signal %d pending", __FUNCTION__, signum)); ci_assert(info); ci_assert(context); memcpy(&our_info->signals[i].saved_info, info, sizeof(siginfo_t)); our_info->signals[i].saved_context = context; /* Hack: in case of SA_ONESHOT, make sure that we intercept * the signal. At the end of citp_signal_run_app_handler, * we will reset the signal handler properly. */ if( citp_signal_data[signum-1].flags & SA_ONESHOT ) sigaction(signum, NULL, NULL); ci_atomic32_or(&our_info->aflags, OO_SIGNAL_FLAG_HAVE_PENDING); return; } log("%s: no empty slot to set pending signal %d", __FUNCTION__, signum); }
int ci_tcp_shutdown(citp_socket* ep, int how, ci_fd_t fd) { ci_sock_cmn* s = ep->s; int rc; if( s->b.state == CI_TCP_LISTEN ) return ci_tcp_shutdown_listen(ep, how, fd); if( SOCK_TO_TCP(s)->snd_delegated ) { /* We do not know which seq number to use. Call * onload_delegated_send_cancel(). */ CI_SET_ERROR(rc, EBUSY); return rc; } if( ! ci_netif_trylock(ep->netif) ) { /* Can't get lock, so try to defer shutdown to the lock holder. */ unsigned flags = 0; switch( s->b.state ) { case CI_TCP_CLOSED: case CI_TCP_TIME_WAIT: CI_SET_ERROR(rc, ENOTCONN); return rc; } if( how == SHUT_RD || how == SHUT_RDWR ) flags |= CI_SOCK_AFLAG_NEED_SHUT_RD; if( how == SHUT_WR || how == SHUT_RDWR ) flags |= CI_SOCK_AFLAG_NEED_SHUT_WR; ci_atomic32_or(&s->s_aflags, flags); if( ! ci_netif_lock_or_defer_work(ep->netif, &s->b) ) return 0; ci_atomic32_and(&s->s_aflags, ~flags); } if( 0 ) { /* Poll to get up-to-date. This is slightly spurious but done to ensure * ordered response to all packets preceding this FIN (e.g. ANVL tcp_core * 9.18) * * DJR: I've disabled this because it can hurt performance for * high-connection-rate apps. May consider adding back (as option?) if * needed. */ ci_netif_poll(ep->netif); } rc = __ci_tcp_shutdown(ep->netif, SOCK_TO_TCP(s), how); if( rc < 0 ) CI_SET_ERROR(rc, -rc); ci_netif_unlock(ep->netif); return rc; }
/*! Run any pending signal handlers ** \param our_info Thread-specific context for current thread */ void citp_signal_run_pending(citp_signal_info *our_info) { /* preserve errno across calls to this function, as it's often called at error time as a result of EXIT_LIB */ int old_errno = errno; int i; LOG_SIG(log("%s: start", __FUNCTION__)); ci_wmb(); ci_assert_equal(our_info->inside_lib, 0); ci_assert(our_info->aflags & OO_SIGNAL_FLAG_HAVE_PENDING); ci_atomic32_and(&our_info->aflags, ~OO_SIGNAL_FLAG_HAVE_PENDING); for( i = 0; i < OO_SIGNAL_MAX_PENDING; i++ ) { siginfo_t saved_info; void *saved_context; int signum; if (our_info->signals[i].signum == 0) break; saved_context = our_info->signals[i].saved_context; if( our_info->signals[i].saved_context ) memcpy(&saved_info, &our_info->signals[i].saved_info, sizeof(saved_info)); signum = our_info->signals[i].signum; if( ci_cas32_fail(&our_info->signals[i].signum, signum, 0) ) break; if( citp_signal_run_app_handler( signum, saved_context == NULL ? NULL : &saved_info, saved_context) ) ci_atomic32_or(&our_info->aflags, OO_SIGNAL_FLAG_NEED_RESTART); else ci_atomic32_and(&our_info->aflags, ~OO_SIGNAL_FLAG_NEED_RESTART); } LOG_SIG(log("%s: end", __FUNCTION__)); errno = old_errno; }
/*! Copy socket options and related fields that should be inherited. * Inherits into [ts] from [s] & [c]. Options are inherited during EP * promotion for unix, during accept handler in Windows & as a result of * setsockopt:SOL_SOCKET:SO_UPDATE_ACCEPT_CONTEXT. MUST have a lock on * [ts]. [or_nonblock] controls whether the non-blocking state from [s] * overwrites that in [ts] or is OR'd into it. */ static void ci_tcp_inherit_options(ci_netif* ni, ci_sock_cmn* s, ci_tcp_socket_cmn* c, ci_tcp_state* ts, const char* ctxt) { ci_assert(ni); ci_assert(s); ci_assert(c); ci_assert(ts); ts->s.so = s->so; ts->s.cp.so_bindtodevice = s->cp.so_bindtodevice; ts->s.cp.ip_ttl = s->cp.ip_ttl; ts->s.rx_bind2dev_ifindex = s->rx_bind2dev_ifindex; ts->s.rx_bind2dev_base_ifindex = s->rx_bind2dev_base_ifindex; ts->s.rx_bind2dev_vlan = s->rx_bind2dev_vlan; ci_tcp_set_sndbuf(ni, ts); /* eff_mss must be valid */ ci_tcp_set_rcvbuf(ni, ts); /* and amss, and rcv_wscl */ { /* NB. We have exclusive access to [ts], so it is safe to manipulate ** s_aflags without using bit-ops. */ unsigned inherited_sflags = CI_SOCK_AFLAG_TCP_INHERITED; unsigned inherited_sbflags = 0; if( NI_OPTS(ni).accept_inherit_nonblock ) inherited_sbflags |= CI_SB_AFLAG_O_NONBLOCK | CI_SB_AFLAG_O_NDELAY; ci_assert((ts->s.s_aflags & inherited_sflags) == 0); ci_atomic32_or(&ts->s.s_aflags, s->s_aflags & inherited_sflags); if( NI_OPTS(ni).tcp_force_nodelay == 1 ) ci_bit_set(&ts->s.s_aflags, CI_SOCK_AFLAG_NODELAY_BIT); else if( NI_OPTS(ni).tcp_force_nodelay == 2 ) ci_bit_clear(&ts->s.s_aflags, CI_SOCK_AFLAG_NODELAY_BIT); ci_assert((ts->s.b.sb_aflags & inherited_sbflags) == 0); ci_atomic32_or(&ts->s.b.sb_aflags, s->b.sb_aflags & inherited_sbflags); ci_assert_equal((ts->s.s_flags & CI_SOCK_FLAG_TCP_INHERITED), CI_SOCK_FLAG_PMTU_DO); ts->s.s_flags &= ~CI_SOCK_FLAG_PMTU_DO; ts->s.s_flags |= s->s_flags & CI_SOCK_FLAG_TCP_INHERITED; } /* Bug1861: while not defined as such, various SOL_TCP/SOL_IP sockopts * are inherited in Linux. */ /* TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT */ ts->c.t_ka_time = c->t_ka_time; ts->c.t_ka_time_in_secs = c->t_ka_time_in_secs; ts->c.t_ka_intvl = c->t_ka_intvl; ts->c.t_ka_intvl_in_secs = c->t_ka_intvl_in_secs; ts->c.ka_probe_th = c->ka_probe_th; ci_ip_hdr_init_fixed(&ts->s.pkt.ip, IPPROTO_TCP, s->pkt.ip.ip_ttl, s->pkt.ip.ip_tos); ts->s.cmsg_flags = s->cmsg_flags; ts->s.timestamping_flags = s->timestamping_flags; /* Must have set up so.sndbuf */ ci_tcp_init_rcv_wnd(ts, ctxt); }