void read_incoming_tun (struct context *c) { /* * Setup for read() call on TUN/TAP device. */ /*ASSERT (!c->c2.to_link.len);*/ perf_push (PERF_READ_IN_TUN); c->c2.buf = c->c2.buffers->read_tun_buf; #ifdef TUN_PASS_BUFFER read_tun_buffered (c->c1.tuntap, &c->c2.buf, MAX_RW_SIZE_TUN (&c->c2.frame)); #else ASSERT (buf_init (&c->c2.buf, FRAME_HEADROOM (&c->c2.frame))); ASSERT (buf_safe (&c->c2.buf, MAX_RW_SIZE_TUN (&c->c2.frame))); c->c2.buf.len = read_tun (c->c1.tuntap, BPTR (&c->c2.buf), MAX_RW_SIZE_TUN (&c->c2.frame)); #endif #ifdef PACKET_TRUNCATION_CHECK ipv4_packet_size_verify (BPTR (&c->c2.buf), BLEN (&c->c2.buf), TUNNEL_TYPE (c->c1.tuntap), "READ_TUN", &c->c2.n_trunc_tun_read); #endif /* Was TUN/TAP interface stopped? */ if (tuntap_stop (c->c2.buf.len)) { register_signal (c, SIGTERM, "tun-stop"); msg (M_INFO, "TUN/TAP interface has been stopped, exiting"); perf_pop (); return; } /* Was TUN/TAP I/O operation aborted? */ if (tuntap_abort(c->c2.buf.len)) { register_signal(c, SIGHUP, "tun-abort"); c->persist.restart_sleep_seconds = 10; msg(M_INFO, "TUN/TAP I/O operation aborted, restarting"); perf_pop(); return; } /* Check the status return from read() */ check_status (c->c2.buf.len, "read from TUN/TAP", NULL, c->c1.tuntap); perf_pop (); }
/** * Main event loop for OpenVPN in client mode, where only one VPN tunnel * is active. * @ingroup eventloop * * @param c - The context structure of the single active VPN tunnel. */ static void tunnel_point_to_point(struct context *c) { context_clear_2(c); /* set point-to-point mode */ c->mode = CM_P2P; /* initialize tunnel instance */ init_instance_handle_signals(c, c->es, CC_HARD_USR1_TO_HUP); if (IS_SIG(c)) { return; } /* main event loop */ while (true) { perf_push(PERF_EVENT_LOOP); /* process timers, TLS, etc. */ pre_select(c); P2P_CHECK_SIG(); /* set up and do the I/O wait */ io_wait(c, p2p_iow_flags(c)); P2P_CHECK_SIG(); /* timeout? */ if (c->c2.event_set_status == ES_TIMEOUT) { perf_pop(); continue; } /* process the I/O which triggered select */ process_io(c); P2P_CHECK_SIG(); perf_pop(); } uninit_management_callback(); /* tear down tunnel instance (unless --persist-tun) */ close_instance(c); }
int key_state_write_plaintext_const (struct key_state_ssl *ks_ssl, const uint8_t *data, int len) { int ret = 0; perf_push (PERF_BIO_WRITE_PLAINTEXT); ASSERT (NULL != ks_ssl); ret = bio_write (ks_ssl->ssl_bio, data, len, "tls_write_plaintext_const"); perf_pop (); return ret; }
void process_incoming_link (struct context *c) { perf_push (PERF_PROC_IN_LINK); struct link_socket_info *lsi = get_link_socket_info (c); const uint8_t *orig_buf = c->c2.buf.data; process_incoming_link_part1(c, lsi, false); process_incoming_link_part2(c, lsi, orig_buf); perf_pop (); }
int key_state_read_plaintext (struct key_state_ssl *ks_ssl, struct buffer *buf, int maxlen) { int ret = 0; perf_push (PERF_BIO_READ_PLAINTEXT); ASSERT (NULL != ks_ssl); ret = bio_read (ks_ssl->ssl_bio, buf, maxlen, "tls_read_plaintext"); perf_pop (); return ret; }
int key_state_write_ciphertext (struct key_state_ssl *ks_ssl, struct buffer *buf) { int ret = 0; perf_push (PERF_BIO_WRITE_CIPHERTEXT); ASSERT (NULL != ks_ssl); ret = bio_write (ks_ssl->ct_in, BPTR(buf), BLEN(buf), "tls_write_ciphertext"); bio_write_post (ret, buf); perf_pop (); return ret; }
int key_state_read_ciphertext (struct key_state_ssl *ks_ssl, struct buffer *buf, int maxlen) { int ret = 0; perf_push (PERF_BIO_READ_CIPHERTEXT); ASSERT (NULL != ks_ssl); ret = bio_read (ks_ssl->ct_out, buf, maxlen, "tls_read_ciphertext"); perf_pop (); return ret; }
int key_state_write_plaintext (struct key_state_ssl *ks_ssl, struct buffer *buf) { int ret = 0; perf_push (PERF_BIO_WRITE_PLAINTEXT); #ifdef ENABLE_CRYPTO_OPENSSL ASSERT (NULL != ks_ssl); ret = bio_write (ks_ssl->ssl_bio, BPTR(buf), BLEN(buf), "tls_write_plaintext"); bio_write_post (ret, buf); #endif /* ENABLE_CRYPTO_OPENSSL */ perf_pop (); return ret; }
void process_incoming_tun (struct context *c) { struct gc_arena gc = gc_new (); perf_push (PERF_PROC_IN_TUN); if (c->c2.buf.len > 0) c->c2.tun_read_bytes += c->c2.buf.len; #ifdef LOG_RW if (c->c2.log_rw && c->c2.buf.len > 0) fprintf (stderr, "r"); #endif /* Show packet content */ dmsg (D_TUN_RW, "TUN READ [%d]", BLEN (&c->c2.buf)); if (c->c2.buf.len > 0) { if ((c->options.mode == MODE_POINT_TO_POINT) && (!c->options.allow_recursive_routing)) drop_if_recursive_routing (c, &c->c2.buf); /* * The --passtos and --mssfix options require * us to examine the IP header (IPv4 or IPv6). */ process_ip_header (c, PIPV4_PASSTOS|PIP_MSSFIX|PIPV4_CLIENT_NAT, &c->c2.buf); #ifdef PACKET_TRUNCATION_CHECK /* if (c->c2.buf.len > 1) --c->c2.buf.len; */ ipv4_packet_size_verify (BPTR (&c->c2.buf), BLEN (&c->c2.buf), TUNNEL_TYPE (c->c1.tuntap), "PRE_ENCRYPT", &c->c2.n_trunc_pre_encrypt); #endif encrypt_sign (c, true); } else { buf_reset (&c->c2.to_link); } perf_pop (); gc_free (&gc); }
/** * Main event loop for OpenVPN in UDP server mode. * @ingroup eventloop * * This function implements OpenVPN's main event loop for UDP server mode. * At this time, OpenVPN does not yet support multithreading. This * function's name is therefore slightly misleading. * * @param top - Top-level context structure. */ static void tunnel_server_udp_single_threaded (struct context *top) { struct multi_context multi; top->mode = CM_TOP; context_clear_2 (top); /* initialize top-tunnel instance */ init_instance_handle_signals (top, top->es, CC_HARD_USR1_TO_HUP); if (IS_SIG (top)) return; /* initialize global multi_context object */ multi_init (&multi, top, false, MC_SINGLE_THREADED); /* initialize our cloned top object */ multi_top_init (&multi, top, true); /* initialize management interface */ init_management_callback_multi (&multi); /* finished with initialization */ initialization_sequence_completed (top, ISC_SERVER); /* --mode server --proto udp */ /* per-packet event loop */ while (true) { perf_push (PERF_EVENT_LOOP); /* set up and do the io_wait() */ multi_get_timeout (&multi, &multi.top.c2.timeval); io_wait (&multi.top, p2mp_iow_flags (&multi)); MULTI_CHECK_SIG (&multi); /* check on status of coarse timers */ multi_process_per_second_timers (&multi); /* timeout? */ if (multi.top.c2.event_set_status == ES_TIMEOUT) { multi_process_timeout (&multi, MPP_PRE_SELECT|MPP_CLOSE_ON_SIGNAL); } else { /* process I/O */ multi_process_io_udp (&multi); MULTI_CHECK_SIG (&multi); } perf_pop (); } /* shut down management interface */ uninit_management_callback_multi (&multi); /* save ifconfig-pool */ multi_ifconfig_pool_persist (&multi, true); /* tear down tunnel instance (unless --persist-tun) */ multi_uninit (&multi); multi_top_free (&multi); close_instance (top); }
void process_incoming_link (struct context *c) { struct gc_arena gc = gc_new (); bool decrypt_status; struct link_socket_info *lsi = get_link_socket_info (c); const uint8_t *orig_buf = c->c2.buf.data; perf_push (PERF_PROC_IN_LINK); if (c->c2.buf.len > 0) { c->c2.link_read_bytes += c->c2.buf.len; link_read_bytes_global += c->c2.buf.len; #ifdef ENABLE_MEMSTATS if (mmap_stats) mmap_stats->link_read_bytes = link_read_bytes_global; #endif c->c2.original_recv_size = c->c2.buf.len; #ifdef ENABLE_MANAGEMENT if (management) { management_bytes_in (management, c->c2.buf.len); #ifdef MANAGEMENT_DEF_AUTH management_bytes_server (management, &c->c2.link_read_bytes, &c->c2.link_write_bytes, &c->c2.mda_context); #endif } #endif } else c->c2.original_recv_size = 0; #ifdef ENABLE_DEBUG /* take action to corrupt packet if we are in gremlin test mode */ if (c->options.gremlin) { if (!ask_gremlin (c->options.gremlin)) c->c2.buf.len = 0; corrupt_gremlin (&c->c2.buf, c->options.gremlin); } #endif /* log incoming packet */ #ifdef LOG_RW if (c->c2.log_rw && c->c2.buf.len > 0) fprintf (stderr, "R"); #endif msg (D_LINK_RW, "%s READ [%d] from %s: %s", proto2ascii (lsi->proto, true), BLEN (&c->c2.buf), print_link_socket_actual (&c->c2.from, &gc), PROTO_DUMP (&c->c2.buf, &gc)); /* * Good, non-zero length packet received. * Commence multi-stage processing of packet, * such as authenticate, decrypt, decompress. * If any stage fails, it sets buf.len to 0 or -1, * telling downstream stages to ignore the packet. */ if (c->c2.buf.len > 0) { if (!link_socket_verify_incoming_addr (&c->c2.buf, lsi, &c->c2.from)) link_socket_bad_incoming_addr (&c->c2.buf, lsi, &c->c2.from); #ifdef ENABLE_CRYPTO #ifdef ENABLE_SSL if (c->c2.tls_multi) { /* * If tls_pre_decrypt returns true, it means the incoming * packet was a good TLS control channel packet. If so, TLS code * will deal with the packet and set buf.len to 0 so downstream * stages ignore it. * * If the packet is a data channel packet, tls_pre_decrypt * will load crypto_options with the correct encryption key * and return false. */ if (tls_pre_decrypt (c->c2.tls_multi, &c->c2.from, &c->c2.buf, &c->c2.crypto_options)) { interval_action (&c->c2.tmp_int); /* reset packet received timer if TLS packet */ if (c->options.ping_rec_timeout) event_timeout_reset (&c->c2.ping_rec_interval); } } #if P2MP_SERVER /* * Drop non-TLS packet if client-connect script/plugin has not * yet succeeded. */ if (c->c2.context_auth != CAS_SUCCEEDED) c->c2.buf.len = 0; #endif #endif /* ENABLE_SSL */ /* authenticate and decrypt the incoming packet */ decrypt_status = openvpn_decrypt (&c->c2.buf, c->c2.buffers->decrypt_buf, &c->c2.crypto_options, &c->c2.frame); if (!decrypt_status && link_socket_connection_oriented (c->c2.link_socket)) { /* decryption errors are fatal in TCP mode */ register_signal (c, SIGUSR1, "decryption-error"); /* SOFT-SIGUSR1 -- decryption error in TCP mode */ msg (D_STREAM_ERRORS, "Fatal decryption error (process_incoming_link), restarting"); goto done; } #endif /* ENABLE_CRYPTO */ #ifdef ENABLE_FRAGMENT if (c->c2.fragment) fragment_incoming (c->c2.fragment, &c->c2.buf, &c->c2.frame_fragment); #endif #ifdef ENABLE_LZO /* decompress the incoming packet */ if (lzo_defined (&c->c2.lzo_compwork)) lzo_decompress (&c->c2.buf, c->c2.buffers->lzo_decompress_buf, &c->c2.lzo_compwork, &c->c2.frame); #endif #ifdef PACKET_TRUNCATION_CHECK /* if (c->c2.buf.len > 1) --c->c2.buf.len; */ ipv4_packet_size_verify (BPTR (&c->c2.buf), BLEN (&c->c2.buf), TUNNEL_TYPE (c->c1.tuntap), "POST_DECRYPT", &c->c2.n_trunc_post_decrypt); #endif /* * Set our "official" outgoing address, since * if buf.len is non-zero, we know the packet * authenticated. In TLS mode we do nothing * because TLS mode takes care of source address * authentication. * * Also, update the persisted version of our packet-id. */ if (!TLS_MODE (c)) link_socket_set_outgoing_addr (&c->c2.buf, lsi, &c->c2.from, NULL, c->c2.es); /* reset packet received timer */ if (c->options.ping_rec_timeout && c->c2.buf.len > 0) event_timeout_reset (&c->c2.ping_rec_interval); /* increment authenticated receive byte count */ if (c->c2.buf.len > 0) { c->c2.link_read_bytes_auth += c->c2.buf.len; c->c2.max_recv_size_local = max_int (c->c2.original_recv_size, c->c2.max_recv_size_local); } /* Did we just receive an openvpn ping packet? */ if (is_ping_msg (&c->c2.buf)) { dmsg (D_PING, "RECEIVED PING PACKET"); c->c2.buf.len = 0; /* drop packet */ } #ifdef ENABLE_OCC /* Did we just receive an OCC packet? */ if (is_occ_msg (&c->c2.buf)) process_received_occ_msg (c); #endif buffer_turnover (orig_buf, &c->c2.to_tun, &c->c2.buf, &c->c2.buffers->read_link_buf); /* to_tun defined + unopened tuntap can cause deadlock */ if (!tuntap_defined (c->c1.tuntap)) c->c2.to_tun.len = 0; } else { buf_reset (&c->c2.to_tun); } done: perf_pop (); gc_free (&gc); }
void read_incoming_link (struct context *c) { /* * Set up for recvfrom call to read datagram * sent to our TCP/UDP port. */ int status; /*ASSERT (!c->c2.to_tun.len);*/ perf_push (PERF_READ_IN_LINK); c->c2.buf = c->c2.buffers->read_link_buf; ASSERT (buf_init (&c->c2.buf, FRAME_HEADROOM_ADJ (&c->c2.frame, FRAME_HEADROOM_MARKER_READ_LINK))); status = link_socket_read (c->c2.link_socket, &c->c2.buf, MAX_RW_SIZE_LINK (&c->c2.frame), &c->c2.from); if (socket_connection_reset (c->c2.link_socket, status)) { #if PORT_SHARE if (port_share && socket_foreign_protocol_detected (c->c2.link_socket)) { const struct buffer *fbuf = socket_foreign_protocol_head (c->c2.link_socket); const int sd = socket_foreign_protocol_sd (c->c2.link_socket); port_share_redirect (port_share, fbuf, sd); register_signal (c, SIGTERM, "port-share-redirect"); } else #endif { /* received a disconnect from a connection-oriented protocol */ if (c->options.inetd) { register_signal (c, SIGTERM, "connection-reset-inetd"); msg (D_STREAM_ERRORS, "Connection reset, inetd/xinetd exit [%d]", status); } else { #ifdef ENABLE_OCC if (event_timeout_defined(&c->c2.explicit_exit_notification_interval)) { msg (D_STREAM_ERRORS, "Connection reset during exit notification period, ignoring [%d]", status); openvpn_sleep(1); } else #endif { register_signal (c, SIGUSR1, "connection-reset"); /* SOFT-SIGUSR1 -- TCP connection reset */ msg (D_STREAM_ERRORS, "Connection reset, restarting [%d]", status); } } } perf_pop (); return; } /* check recvfrom status */ check_status (status, "read", c->c2.link_socket, NULL); #ifdef ENABLE_SOCKS /* Remove socks header if applicable */ socks_postprocess_incoming_link (c); #endif perf_pop (); }
void process_outgoing_tun (struct context *c) { struct gc_arena gc = gc_new (); /* * Set up for write() call to TUN/TAP * device. */ if (c->c2.to_tun.len <= 0) return; perf_push (PERF_PROC_OUT_TUN); /* * The --mssfix option requires * us to examine the IPv4 header. */ process_ipv4_header (c, PIPV4_MSSFIX|PIPV4_EXTRACT_DHCP_ROUTER|PIPV4_CLIENT_NAT|PIPV4_OUTGOING, &c->c2.to_tun); if (c->c2.to_tun.len <= MAX_RW_SIZE_TUN (&c->c2.frame)) { /* * Write to TUN/TAP device. */ int size; #ifdef LOG_RW if (c->c2.log_rw) fprintf (stderr, "w"); #endif dmsg (D_TUN_RW, "TUN WRITE [%d]", BLEN (&c->c2.to_tun)); #ifdef PACKET_TRUNCATION_CHECK ipv4_packet_size_verify (BPTR (&c->c2.to_tun), BLEN (&c->c2.to_tun), TUNNEL_TYPE (c->c1.tuntap), "WRITE_TUN", &c->c2.n_trunc_tun_write); #endif #ifdef TUN_PASS_BUFFER size = write_tun_buffered (c->c1.tuntap, &c->c2.to_tun); #else size = write_tun (c->c1.tuntap, BPTR (&c->c2.to_tun), BLEN (&c->c2.to_tun)); #endif if (size > 0) c->c2.tun_write_bytes += size; check_status (size, "write to TUN/TAP", NULL, c->c1.tuntap); /* check written packet size */ if (size > 0) { /* Did we write a different size packet than we intended? */ if (size != BLEN (&c->c2.to_tun)) msg (D_LINK_ERRORS, "TUN/TAP packet was destructively fragmented on write to %s (tried=%d,actual=%d)", c->c1.tuntap->actual_name, BLEN (&c->c2.to_tun), size); /* indicate activity regarding --inactive parameter */ register_activity (c, size); } } else { /* * This should never happen, probably indicates some kind * of MTU mismatch. */ msg (D_LINK_ERRORS, "tun packet too large on write (tried=%d,max=%d)", c->c2.to_tun.len, MAX_RW_SIZE_TUN (&c->c2.frame)); } buf_reset (&c->c2.to_tun); perf_pop (); gc_free (&gc); }
void process_outgoing_link (struct context *c) { struct gc_arena gc = gc_new (); perf_push (PERF_PROC_OUT_LINK); if (c->c2.to_link.len > 0 && c->c2.to_link.len <= EXPANDED_SIZE (&c->c2.frame)) { /* * Setup for call to send/sendto which will send * packet to remote over the TCP/UDP port. */ int size = 0; ASSERT (link_socket_actual_defined (c->c2.to_link_addr)); #ifdef ENABLE_DEBUG /* In gremlin-test mode, we may choose to drop this packet */ if (!c->options.gremlin || ask_gremlin (c->options.gremlin)) #endif { /* * Let the traffic shaper know how many bytes * we wrote. */ #ifdef ENABLE_FEATURE_SHAPER if (c->options.shaper) shaper_wrote_bytes (&c->c2.shaper, BLEN (&c->c2.to_link) + datagram_overhead (c->options.ce.proto)); #endif /* * Let the pinger know that we sent a packet. */ if (c->options.ping_send_timeout) event_timeout_reset (&c->c2.ping_send_interval); #if PASSTOS_CAPABILITY /* Set TOS */ link_socket_set_tos (c->c2.link_socket); #endif /* Log packet send */ #ifdef LOG_RW if (c->c2.log_rw) fprintf (stderr, "W"); #endif msg (D_LINK_RW, "%s WRITE [%d] to %s: %s", proto2ascii (c->c2.link_socket->info.proto, true), BLEN (&c->c2.to_link), print_link_socket_actual (c->c2.to_link_addr, &gc), PROTO_DUMP (&c->c2.to_link, &gc)); /* Packet send complexified by possible Socks5 usage */ { struct link_socket_actual *to_addr = c->c2.to_link_addr; #ifdef ENABLE_SOCKS int size_delta = 0; #endif #ifdef ENABLE_SOCKS /* If Socks5 over UDP, prepend header */ socks_preprocess_outgoing_link (c, &to_addr, &size_delta); #endif /* Send packet */ size = link_socket_write (c->c2.link_socket, &c->c2.to_link, to_addr); #ifdef ENABLE_SOCKS /* Undo effect of prepend */ link_socket_write_post_size_adjust (&size, size_delta, &c->c2.to_link); #endif } if (size > 0) { c->c2.max_send_size_local = max_int (size, c->c2.max_send_size_local); c->c2.link_write_bytes += size; link_write_bytes_global += size; #ifdef ENABLE_MEMSTATS if (mmap_stats) mmap_stats->link_write_bytes = link_write_bytes_global; #endif #ifdef ENABLE_MANAGEMENT if (management) { management_bytes_out (management, size); #ifdef MANAGEMENT_DEF_AUTH management_bytes_server (management, &c->c2.link_read_bytes, &c->c2.link_write_bytes, &c->c2.mda_context); #endif } #endif } } /* Check return status */ check_status (size, "write", c->c2.link_socket, NULL); if (size > 0) { /* Did we write a different size packet than we intended? */ if (size != BLEN (&c->c2.to_link)) msg (D_LINK_ERRORS, "TCP/UDP packet was truncated/expanded on write to %s (tried=%d,actual=%d)", print_link_socket_actual (c->c2.to_link_addr, &gc), BLEN (&c->c2.to_link), size); } /* if not a ping/control message, indicate activity regarding --inactive parameter */ if (c->c2.buf.len > 0 ) register_activity (c, size); } else { if (c->c2.to_link.len > 0) msg (D_LINK_ERRORS, "TCP/UDP packet too large on write to %s (tried=%d,max=%d)", print_link_socket_actual (c->c2.to_link_addr, &gc), c->c2.to_link.len, EXPANDED_SIZE (&c->c2.frame)); } buf_reset (&c->c2.to_link); perf_pop (); gc_free (&gc); }
/* * Top level event loop for single-threaded operation. * TCP mode. */ void tunnel_server_tcp(struct context *top) { struct multi_context multi; int status; top->mode = CM_TOP; context_clear_2(top); /* initialize top-tunnel instance */ init_instance_handle_signals(top, top->es, CC_HARD_USR1_TO_HUP); if (IS_SIG(top)) { return; } /* initialize global multi_context object */ multi_init(&multi, top, true, MC_SINGLE_THREADED); /* initialize our cloned top object */ multi_top_init(&multi, top); /* initialize management interface */ init_management_callback_multi(&multi); /* finished with initialization */ initialization_sequence_completed(top, ISC_SERVER); /* --mode server --proto tcp-server */ #ifdef ENABLE_ASYNC_PUSH multi.top.c2.inotify_fd = inotify_init(); if (multi.top.c2.inotify_fd < 0) { msg(D_MULTI_ERRORS | M_ERRNO, "MULTI: inotify_init error"); } #endif /* per-packet event loop */ while (true) { perf_push(PERF_EVENT_LOOP); /* wait on tun/socket list */ multi_get_timeout(&multi, &multi.top.c2.timeval); status = multi_tcp_wait(&multi.top, multi.mtcp); MULTI_CHECK_SIG(&multi); /* check on status of coarse timers */ multi_process_per_second_timers(&multi); /* timeout? */ if (status > 0) { /* process the I/O which triggered select */ multi_tcp_process_io(&multi); MULTI_CHECK_SIG(&multi); } else if (status == 0) { multi_tcp_action(&multi, NULL, TA_TIMEOUT, false); } perf_pop(); } #ifdef ENABLE_ASYNC_PUSH close(top->c2.inotify_fd); #endif /* shut down management interface */ uninit_management_callback_multi(&multi); /* save ifconfig-pool */ multi_ifconfig_pool_persist(&multi, true); /* tear down tunnel instance (unless --persist-tun) */ multi_uninit(&multi); multi_top_free(&multi); close_instance(top); }
static int multi_tcp_wait_lite(struct multi_context *m, struct multi_instance *mi, const int action, bool *tun_input_pending) { struct context *c = multi_tcp_context(m, mi); unsigned int looking_for = 0; dmsg(D_MULTI_DEBUG, "MULTI TCP: multi_tcp_wait_lite a=%s mi=" ptr_format, pract(action), (ptr_type)mi); tv_clear(&c->c2.timeval); /* ZERO-TIMEOUT */ switch (action) { case TA_TUN_READ: looking_for = TUN_READ; tun_input_pending = NULL; io_wait(c, IOW_READ_TUN); break; case TA_SOCKET_READ: looking_for = SOCKET_READ; tun_input_pending = NULL; io_wait(c, IOW_READ_LINK); break; case TA_TUN_WRITE: looking_for = TUN_WRITE; tun_input_pending = NULL; c->c2.timeval.tv_sec = 1; /* For some reason, the Linux 2.2 TUN/TAP driver hits this timeout */ perf_push(PERF_PROC_OUT_TUN_MTCP); io_wait(c, IOW_TO_TUN); perf_pop(); break; case TA_SOCKET_WRITE: looking_for = SOCKET_WRITE; io_wait(c, IOW_TO_LINK|IOW_READ_TUN_FORCE); break; default: msg(M_FATAL, "MULTI TCP: multi_tcp_wait_lite, unhandled action=%d", action); } if (tun_input_pending && (c->c2.event_set_status & TUN_READ)) { *tun_input_pending = true; } if (c->c2.event_set_status & looking_for) { return action; } else { switch (action) { /* TCP socket output buffer is full */ case TA_SOCKET_WRITE: return TA_SOCKET_WRITE_DEFERRED; /* TUN device timed out on accepting write */ case TA_TUN_WRITE: return TA_TUN_WRITE_TIMEOUT; } return TA_UNDEF; } }