void zmq::stream_engine_t::in_event () { bool disconnection = false; // If there's no data to process in the buffer... if (!insize) { // Retrieve the buffer and read as much data as possible. // Note that buffer can be arbitrarily large. However, we assume // the underlying TCP layer has fixed buffer size and thus the // number of bytes read will be always limited. decoder.get_buffer (&inpos, &insize); insize = read (inpos, insize); // Check whether the peer has closed the connection. if (insize == (size_t) -1) { insize = 0; disconnection = true; } } // Push the data to the decoder. size_t processed = decoder.process_buffer (inpos, insize); if (unlikely (processed == (size_t) -1)) { disconnection = true; } else { // Stop polling for input if we got stuck. if (processed < insize) reset_pollin (handle); // Adjust the buffer. inpos += processed; insize -= processed; } // Flush all messages the decoder may have produced. session->flush (); // Input error has occurred. If the last decoded // message has already been accepted, we terminate // the engine immediately. Otherwise, we stop // waiting for input events and postpone the termination // until after the session has accepted the message. if (disconnection) { input_error = true; if (decoder.stalled ()) reset_pollin (handle); else error (); } }
void zmq::zmq_engine_t::in_event () { // If there's no data to process in the buffer... if (!insize) { // Retrieve the buffer and read as much data as possible. decoder.get_buffer (&inpos, &insize); insize = tcp_socket.read (inpos, insize); // Check whether the peer has closed the connection. if (insize == (size_t) -1) { insize = 0; error (); return; } } // Push the data to the decoder. size_t processed = decoder.process_buffer (inpos, insize); // Adjust the buffer. inpos += processed; insize -= processed; // Stop polling for input if we got stuck. if (processed < insize) reset_pollin (handle); // Flush all messages the decoder may have produced. inout->flush (); }
void zmq::zmq_engine_t::in_event () { bool disconnection = false; // If there's no data to process in the buffer... if (!insize) { // Retrieve the buffer and read as much data as possible. // Note that buffer can be arbitrarily large. However, we assume // the underlying TCP layer has fixed buffer size and thus the // number of bytes read will be always limited. decoder.get_buffer (&inpos, &insize); insize = tcp_socket.read (inpos, insize); // Check whether the peer has closed the connection. if (insize == (size_t) -1) { insize = 0; disconnection = true; } } // Push the data to the decoder. size_t processed = decoder.process_buffer (inpos, insize); if (unlikely (processed == (size_t) -1)) { disconnection = true; } else { // Stop polling for input if we got stuck. if (processed < insize) { // This may happen if queue limits are in effect or when // init object reads all required information from the socket // and rejects to read more data. if (plugged) reset_pollin (handle); } // Adjust the buffer. inpos += processed; insize -= processed; } // Flush all messages the decoder may have produced. // If IO handler has unplugged engine, flush transient IO handler. if (unlikely (!plugged)) { zmq_assert (ephemeral_inout); ephemeral_inout->flush (); } else { inout->flush (); } if (inout && disconnection) error (); }
void xs::stream_engine_t::in_event (fd_t fd_) { bool disconnection = false; // If there's no data to process in the buffer... if (!insize) { // Retrieve the buffer and read as much data as possible. // Note that buffer can be arbitrarily large. However, we assume // the underlying TCP layer has fixed buffer size and thus the // number of bytes read will be always limited. decoder.get_buffer (&inpos, &insize); insize = read (inpos, insize); // Check whether the peer has closed the connection. if (insize == (size_t) -1) { insize = 0; disconnection = true; } } // Push the data to the decoder. size_t processed = decoder.process_buffer (inpos, insize); if (unlikely (processed == (size_t) -1)) { disconnection = true; } else { // Stop polling for input if we got stuck. if (processed < insize) { // This may happen if queue limits are in effect. if (plugged) reset_pollin (handle); } // Adjust the buffer. inpos += processed; insize -= processed; } // Flush all messages the decoder may have produced. // If IO handler has unplugged engine, flush transient IO handler. if (unlikely (!plugged)) { xs_assert (leftover_session); leftover_session->flush (); } else { session->flush (); } if (session && disconnection) error (); }
void zmq::zmq_engine_t::in_event () { bool disconnection = false; // If there's no data to process in the buffer... if (!insize) { // Retrieve the buffer and read as much data as possible. decoder.get_buffer (&inpos, &insize); insize = tcp_socket.read (inpos, insize); // Check whether the peer has closed the connection. if (insize == (size_t) -1) { insize = 0; disconnection = true; } } // Push the data to the decoder. size_t processed = decoder.process_buffer (inpos, insize); // Stop polling for input if we got stuck. if (processed < insize) { // This may happen if queue limits are in effect or when // init object reads all required information from the socket // and rejects to read more data. reset_pollin (handle); } // Adjust the buffer. inpos += processed; insize -= processed; // Flush all messages the decoder may have produced. inout->flush (); if (disconnection) error (); }
void zmq::stream_engine_t::in_event () { zmq_assert (!io_error); // If still handshaking, receive and process the greeting message. if (unlikely (handshaking)) if (!handshake ()) return; zmq_assert (decoder); // If there has been an I/O error, stop polling. if (input_stopped) { rm_fd (handle); io_error = true; return; } // If there's no data to process in the buffer... if (!insize) { // Retrieve the buffer and read as much data as possible. // Note that buffer can be arbitrarily large. However, we assume // the underlying TCP layer has fixed buffer size and thus the // number of bytes read will be always limited. size_t bufsize = 0; decoder->get_buffer (&inpos, &bufsize); const int rc = tcp_read (s, inpos, bufsize); if (rc == 0) { error (connection_error); return; } if (rc == -1) { if (errno != EAGAIN) error (connection_error); return; } // Adjust input size insize = static_cast <size_t> (rc); } int rc = 0; size_t processed = 0; while (insize > 0) { rc = decoder->decode (inpos, insize, processed); zmq_assert (processed <= insize); inpos += processed; insize -= processed; if (rc == 0 || rc == -1) break; rc = (this->*process_msg) (decoder->msg ()); if (rc == -1) break; } // Tear down the connection if we have failed to decode input data // or the session has rejected the message. if (rc == -1) { if (errno != EAGAIN) { error (protocol_error); return; } input_stopped = true; reset_pollin (handle); } session->flush (); }
void zmq::pgm_receiver_t::in_event () { // Read data from the underlying pgm_socket. const pgm_tsi_t *tsi = NULL; if (has_rx_timer) { cancel_timer (rx_timer_id); has_rx_timer = false; } // TODO: This loop can effectively block other engines in the same I/O // thread in the case of high load. while (true) { // Get new batch of data. // Note the workaround made not to break strict-aliasing rules. void *tmp = NULL; ssize_t received = pgm_socket.receive (&tmp, &tsi); inpos = (unsigned char*) tmp; // No data to process. This may happen if the packet received is // neither ODATA nor ODATA. if (received == 0) { if (errno == ENOMEM || errno == EBUSY) { const long timeout = pgm_socket.get_rx_timeout (); add_timer (timeout, rx_timer_id); has_rx_timer = true; } break; } // Find the peer based on its TSI. peers_t::iterator it = peers.find (*tsi); // Data loss. Delete decoder and mark the peer as disjoint. if (received == -1) { if (it != peers.end ()) { it->second.joined = false; if (it->second.decoder != NULL) { delete it->second.decoder; it->second.decoder = NULL; } } break; } // New peer. Add it to the list of know but unjoint peers. if (it == peers.end ()) { peer_info_t peer_info = {false, NULL}; it = peers.insert (peers_t::value_type (*tsi, peer_info)).first; } insize = static_cast <size_t> (received); // Read the offset of the fist message in the current packet. zmq_assert (insize >= sizeof (uint16_t)); uint16_t offset = get_uint16 (inpos); inpos += sizeof (uint16_t); insize -= sizeof (uint16_t); // Join the stream if needed. if (!it->second.joined) { // There is no beginning of the message in current packet. // Ignore the data. if (offset == 0xffff) continue; zmq_assert (offset <= insize); zmq_assert (it->second.decoder == NULL); // We have to move data to the begining of the first message. inpos += offset; insize -= offset; // Mark the stream as joined. it->second.joined = true; // Create and connect decoder for the peer. it->second.decoder = new (std::nothrow) v1_decoder_t (0, options.maxmsgsize); alloc_assert (it->second.decoder); } int rc = process_input (it->second.decoder); if (rc == -1) { if (errno == EAGAIN) { active_tsi = tsi; // Stop polling. reset_pollin (pipe_handle); reset_pollin (socket_handle); break; } it->second.joined = false; delete it->second.decoder; it->second.decoder = NULL; insize = 0; } } // Flush any messages decoder may have produced. session->flush (); }
void zmq::socks_connecter_t::in_event () { zmq_assert (status != unplugged && status != waiting_for_reconnect_time); if (status == waiting_for_choice) { int rc = choice_decoder.input (s); if (rc == 0 || rc == -1) error (); else if (choice_decoder.message_ready ()) { const socks_choice_t choice = choice_decoder.decode (); rc = process_server_response (choice); if (rc == -1) error (); else { std::string hostname = ""; uint16_t port = 0; if (parse_address (addr->address, hostname, port) == -1) error (); else { request_encoder.encode ( socks_request_t (1, hostname, port)); reset_pollin (handle); set_pollout (handle); status = sending_request; } } } } else if (status == waiting_for_response) { int rc = response_decoder.input (s); if (rc == 0 || rc == -1) error (); else if (response_decoder.message_ready ()) { const socks_response_t response = response_decoder.decode (); rc = process_server_response (response); if (rc == -1) error (); else { // Create the engine object for this connection. stream_engine_t *engine = new (std::nothrow) stream_engine_t (s, options, endpoint); alloc_assert (engine); // Attach the engine to the corresponding session object. send_attach (session, engine); socket->event_connected (endpoint, (int) s); rm_fd (handle); s = -1; status = unplugged; // Shut the connecter down. terminate (); } } } else error (); }
// Called when POLLIN is fired on the socket. void zmq::udp_receiver_t::in_event (fd_t fd_) { // Receive a packet. ssize_t recv_bytes = recv (socket, data, sizeof data, 0); // At the moment, go back to polling on EAGAIN and assert on any // other error. if ((recv_bytes < 0) && errno == EAGAIN) return; assert (recv_bytes > 0); // Parse UDP packet header. unsigned char *data_p = data; uint32_t seq_no = get_uint32 (data_p); uint16_t offset = get_uint16 (data_p + 4); data_p += udp_header_size; recv_bytes -= udp_header_size; // If this is our first packet, join the message stream. if (last_seq_no == 0) { if (offset == 0xffff) return; else { data_p += offset; recv_bytes -= offset; } } // Otherwise, decide based on the sequence number. else { // If this packet is in sequence, process the whole packet. if ((last_seq_no + 1) == seq_no) ; // Otherwise, if it is an old packet, drop it. else if (seq_no <= last_seq_no) return; // Otherwise we have packet loss, rejoin the message stream. else { if (offset == 0xffff) return; else { data_p += offset; recv_bytes -= offset; // Re-create decoder to clear state. delete decoder; decoder = NULL; decoder = new (std::nothrow) v1_decoder_t (in_batch_size, options.maxmsgsize); alloc_assert (decoder); //decoder->set_session (session); } } } // If we get here, we will process this packet and it becomes our // last seen sequence number. last_seq_no = seq_no; // Decode data and push it to our pipe. ssize_t processed_bytes = 0;//decoder->process_buffer (data_p, recv_bytes); if (processed_bytes < recv_bytes) { // Some data could not be written to the pipe. Save it for later. pending_bytes = recv_bytes - processed_bytes; pending_p = data_p + processed_bytes; // Stop polling. We will be restarted by a call to activate_in (). reset_pollin (socket_handle); } // Flush any messages produced by the decoder to the pipe. session->flush (); }
void zmq::pgm_receiver_t::in_event () { // Read data from the underlying pgm_socket. unsigned char *data = NULL; const pgm_tsi_t *tsi = NULL; zmq_assert (pending_bytes == 0); // TODO: This loop can effectively block other engines in the same I/O // thread in the case of high load. while (true) { // Get new batch of data. ssize_t received = pgm_socket.receive ((void**) &data, &tsi); // No data to process. This may happen if the packet received is // neither ODATA nor ODATA. if (received == 0) break; // Find the peer based on its TSI. peers_t::iterator it = peers.find (*tsi); // Data loss. Delete decoder and mark the peer as disjoint. if (received == -1) { if (it != peers.end ()) { it->second.joined = false; if (it->second.decoder == mru_decoder) mru_decoder = NULL; if (it->second.decoder != NULL) { delete it->second.decoder; it->second.decoder = NULL; } } break; } // New peer. Add it to the list of know but unjoint peers. if (it == peers.end ()) { peer_info_t peer_info = {false, NULL}; it = peers.insert (std::make_pair (*tsi, peer_info)).first; } // Read the offset of the fist message in the current packet. zmq_assert ((size_t) received >= sizeof (uint16_t)); uint16_t offset = get_uint16 (data); data += sizeof (uint16_t); received -= sizeof (uint16_t); // Join the stream if needed. if (!it->second.joined) { // There is no beginning of the message in current packet. // Ignore the data. if (offset == 0xffff) continue; zmq_assert (offset <= received); zmq_assert (it->second.decoder == NULL); // We have to move data to the begining of the first message. data += offset; received -= offset; // Mark the stream as joined. it->second.joined = true; // Create and connect decoder for the peer. it->second.decoder = new (std::nothrow) zmq_decoder_t (0); it->second.decoder->set_inout (inout); } mru_decoder = it->second.decoder; // Push all the data to the decoder. ssize_t processed = it->second.decoder->process_buffer (data, received); if (processed < received) { // Save some state so we can resume the decoding process later. pending_bytes = received - processed; pending_ptr = data + processed; // Stop polling. reset_pollin (pipe_handle); reset_pollin (socket_handle); break; } } // Flush any messages decoder may have produced. inout->flush (); }
void xs::pgm_receiver_t::in_event (fd_t fd_) { // Read data from the underlying pgm_socket. unsigned char *data = NULL; const pgm_tsi_t *tsi = NULL; if (pending_bytes > 0) return; if (rx_timer) { rm_timer (rx_timer); rx_timer = NULL; } // TODO: This loop can effectively block other engines in the same I/O // thread in the case of high load. while (true) { // Get new batch of data. // Note the workaround made not to break strict-aliasing rules. void *tmp = NULL; ssize_t received = pgm_socket.receive (&tmp, &tsi); data = (unsigned char*) tmp; // No data to process. This may happen if the packet received is // neither ODATA nor ODATA. if (received == 0) { if (errno == ENOMEM || errno == EBUSY) { const long timeout = pgm_socket.get_rx_timeout (); xs_assert (!rx_timer); rx_timer = add_timer (timeout); } break; } // Find the peer based on its TSI. peers_t::iterator it = peers.find (*tsi); // Data loss. Delete decoder and mark the peer as disjoint. if (received == -1) { if (it != peers.end ()) { it->second.joined = false; if (it->second.decoder == mru_decoder) mru_decoder = NULL; if (it->second.decoder != NULL) { delete it->second.decoder; it->second.decoder = NULL; } } break; } // New peer. Add it to the list of know but unjoint peers. if (it == peers.end ()) { peer_info_t peer_info = {false, NULL}; it = peers.insert (peers_t::value_type (*tsi, peer_info)).first; } // Read the offset of the fist message in the current packet. xs_assert ((size_t) received >= sizeof (uint16_t)); uint16_t offset = get_uint16 (data); data += sizeof (uint16_t); received -= sizeof (uint16_t); // Join the stream if needed. if (!it->second.joined) { // There is no beginning of the message in current packet. // Ignore the data. if (offset == 0xffff) continue; xs_assert (offset <= received); xs_assert (it->second.decoder == NULL); // We have to move data to the begining of the first message. data += offset; received -= offset; // Mark the stream as joined. it->second.joined = true; // Create and connect decoder for the peer. it->second.decoder = new (std::nothrow) decoder_t (0, options.maxmsgsize); alloc_assert (it->second.decoder); it->second.decoder->set_session (session); } mru_decoder = it->second.decoder; // Push all the data to the decoder. ssize_t processed = it->second.decoder->process_buffer (data, received); if (processed < received) { // Save some state so we can resume the decoding process later. pending_bytes = received - processed; pending_ptr = data + processed; // Stop polling. reset_pollin (pipe_handle); reset_pollin (socket_handle); // Reset outstanding timer. if (rx_timer) { rm_timer (rx_timer); rx_timer = NULL; } break; } } // Flush any messages decoder may have produced. session->flush (); }
void xs::stream_engine_t::in_event (fd_t fd_) { bool disconnection = false; // If we have not yet received the full protocol header... if (unlikely (!options.legacy_protocol && !header_received)) { // Read remaining header bytes. int hbytes = read (header_pos, header_remaining); // Check whether the peer has closed the connection. if (hbytes == -1) { error (); return; } header_remaining -= hbytes; header_pos += hbytes; // If we did not read the whole header, poll for more. if (header_remaining) return; // If the protocol headers do not match, close the connection. if (memcmp (in_header, desired_header, sizeof in_header) != 0) { error (); return; } // Done with protocol header; proceed to read data. header_received = true; } // If there's no data to process in the buffer... if (!insize) { // Retrieve the buffer and read as much data as possible. // Note that buffer can be arbitrarily large. However, we assume // the underlying TCP layer has fixed buffer size and thus the // number of bytes read will be always limited. decoder.get_buffer (&inpos, &insize); insize = read (inpos, insize); // Check whether the peer has closed the connection. if (insize == (size_t) -1) { insize = 0; disconnection = true; } } // Push the data to the decoder. size_t processed = decoder.process_buffer (inpos, insize); if (unlikely (processed == (size_t) -1)) { disconnection = true; } else { // Stop polling for input if we got stuck. if (processed < insize) { // This may happen if queue limits are in effect. if (plugged) reset_pollin (handle); } // Adjust the buffer. inpos += processed; insize -= processed; } // Flush all messages the decoder may have produced. // If IO handler has unplugged engine, flush transient IO handler. if (unlikely (!plugged)) { xs_assert (leftover_session); leftover_session->flush (); } else { session->flush (); } if (session && disconnection) error (); }
void zmq::udp_engine_t::in_event () { struct sockaddr_in in_address; socklen_t in_addrlen = sizeof (sockaddr_in); #ifdef ZMQ_HAVE_WINDOWS int nbytes = recvfrom (fd, (char *) in_buffer, MAX_UDP_MSG, 0, (sockaddr *) &in_address, &in_addrlen); const int last_error = WSAGetLastError (); if (nbytes == SOCKET_ERROR) { wsa_assert (last_error == WSAENETDOWN || last_error == WSAENETRESET || last_error == WSAEWOULDBLOCK); return; } #elif defined ZMQ_HAVE_VXWORKS int nbytes = recvfrom (fd, (char *) in_buffer, MAX_UDP_MSG, 0, (sockaddr *) &in_address, (int *) &in_addrlen); if (nbytes == -1) { errno_assert (errno != EBADF && errno != EFAULT && errno != ENOMEM && errno != ENOTSOCK); return; } #else int nbytes = recvfrom (fd, in_buffer, MAX_UDP_MSG, 0, (sockaddr *) &in_address, &in_addrlen); if (nbytes == -1) { errno_assert (errno != EBADF && errno != EFAULT && errno != ENOMEM && errno != ENOTSOCK); return; } #endif int rc; int body_size; int body_offset; msg_t msg; if (options.raw_socket) { sockaddr_to_msg (&msg, &in_address); body_size = nbytes; body_offset = 0; } else { char *group_buffer = (char *) in_buffer + 1; int group_size = in_buffer[0]; rc = msg.init_size (group_size); errno_assert (rc == 0); msg.set_flags (msg_t::more); memcpy (msg.data (), group_buffer, group_size); // This doesn't fit, just ingore if (nbytes - 1 < group_size) return; body_size = nbytes - 1 - group_size; body_offset = 1 + group_size; } // Push group description to session rc = session->push_msg (&msg); errno_assert (rc == 0 || (rc == -1 && errno == EAGAIN)); // Group description message doesn't fit in the pipe, drop if (rc != 0) { rc = msg.close (); errno_assert (rc == 0); reset_pollin (handle); return; } rc = msg.close (); errno_assert (rc == 0); rc = msg.init_size (body_size); errno_assert (rc == 0); memcpy (msg.data (), in_buffer + body_offset, body_size); // Push message body to session rc = session->push_msg (&msg); // Message body doesn't fit in the pipe, drop and reset session state if (rc != 0) { rc = msg.close (); errno_assert (rc == 0); session->reset (); reset_pollin (handle); return; } rc = msg.close (); errno_assert (rc == 0); session->flush (); }