/* * Process an I/O event. */ static void multi_process_io_udp (struct multi_context *m) { const unsigned int status = m->top.c2.event_set_status; const unsigned int mpp_flags = m->top.c2.fast_io ? (MPP_CONDITIONAL_PRE_SELECT | MPP_CLOSE_ON_SIGNAL) : (MPP_PRE_SELECT | MPP_CLOSE_ON_SIGNAL); #ifdef MULTI_DEBUG_EVENT_LOOP char buf[16]; buf[0] = 0; if (status & SOCKET_READ) strcat (buf, "SR/"); else if (status & SOCKET_WRITE) strcat (buf, "SW/"); else if (status & TUN_READ) strcat (buf, "TR/"); else if (status & TUN_WRITE) strcat (buf, "TW/"); printf ("IO %s\n", buf); #endif #ifdef ENABLE_MANAGEMENT if (status & (MANAGEMENT_READ|MANAGEMENT_WRITE)) { ASSERT (management); management_io (management); } #endif /* UDP port ready to accept write */ if (status & SOCKET_WRITE) { multi_process_outgoing_link (m, mpp_flags); } /* TUN device ready to accept write */ else if (status & TUN_WRITE) { multi_process_outgoing_tun (m, mpp_flags); } /* Incoming data on UDP port */ else if (status & SOCKET_READ) { read_incoming_link (&m->top); multi_release_io_lock (m); if (!IS_SIG (&m->top)) multi_process_incoming_link (m, NULL, mpp_flags); } /* Incoming data on TUN device */ else if (status & TUN_READ) { read_incoming_tun (&m->top); multi_release_io_lock (m); if (!IS_SIG (&m->top)) multi_process_incoming_tun (m, mpp_flags); } }
void process_io (struct context *c) { const unsigned int status = c->c2.event_set_status; #ifdef ENABLE_MANAGEMENT if (status & (MANAGEMENT_READ|MANAGEMENT_WRITE)) { ASSERT (management); management_io (management); } #endif /* TCP/UDP port ready to accept write */ if (status & SOCKET_WRITE) { process_outgoing_link (c); } /* TUN device ready to accept write */ else if (status & TUN_WRITE) { process_outgoing_tun (c); } /* Incoming data on TCP/UDP port */ else if (status & SOCKET_READ) { read_incoming_link (c); if (!IS_SIG (c)) process_incoming_link (c); } /* Incoming data on TUN device */ else if (status & TUN_READ) { read_incoming_tun (c); if (!IS_SIG (c)) process_incoming_tun (c); } }
static void multi_tcp_process_io(struct multi_context *m) { struct multi_tcp *mtcp = m->mtcp; int i; for (i = 0; i < mtcp->n_esr; ++i) { struct event_set_return *e = &mtcp->esr[i]; /* incoming data for instance? */ if (e->arg >= MTCP_N) { struct multi_instance *mi = (struct multi_instance *) e->arg; if (mi) { if (e->rwflags & EVENT_WRITE) { multi_tcp_action(m, mi, TA_SOCKET_WRITE_READY, false); } else if (e->rwflags & EVENT_READ) { multi_tcp_action(m, mi, TA_SOCKET_READ, false); } } } else { #ifdef ENABLE_MANAGEMENT if (e->arg == MTCP_MANAGEMENT) { ASSERT(management); management_io(management); } else #endif /* incoming data on TUN? */ if (e->arg == MTCP_TUN) { if (e->rwflags & EVENT_WRITE) { multi_tcp_action(m, NULL, TA_TUN_WRITE, false); } else if (e->rwflags & EVENT_READ) { multi_tcp_action(m, NULL, TA_TUN_READ, false); } } /* new incoming TCP client attempting to connect? */ else if (e->arg == MTCP_SOCKET) { struct multi_instance *mi; ASSERT(m->top.c2.link_socket); socket_reset_listen_persistent(m->top.c2.link_socket); mi = multi_create_instance_tcp(m); if (mi) { multi_tcp_action(m, mi, TA_INITIAL, false); } } /* signal received? */ else if (e->arg == MTCP_SIG) { get_signal(&m->top.sig->signal_received); } #ifdef ENABLE_ASYNC_PUSH else if (e->arg == MTCP_FILE_CLOSE_WRITE) { multi_process_file_closed(m, MPP_PRE_SELECT | MPP_RECORD_TOUCH); } #endif } if (IS_SIG(&m->top)) { break; } } mtcp->n_esr = 0; /* * Process queued mbuf packets destined for TCP socket */ { struct multi_instance *mi; while (!IS_SIG(&m->top) && (mi = mbuf_peek(m->mbuf)) != NULL) { multi_tcp_action(m, mi, TA_SOCKET_WRITE, true); } } }