static void test_atreadeof(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *sock; apr_socket_t *sock2; apr_proc_t proc; apr_size_t length = STRLEN; char datastr[STRLEN]; int atreadeof = -1; sock = setup_socket(tc); if (!sock) return; launch_child(tc, &proc, "write", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); /* Check that the remote socket is still open */ rv = apr_socket_atreadeof(sock2, &atreadeof); APR_ASSERT_SUCCESS(tc, "Determine whether at EOF, #1", rv); ABTS_INT_EQUAL(tc, 0, atreadeof); memset(datastr, 0, STRLEN); apr_socket_recv(sock2, datastr, &length); /* Make sure that the server received the data we sent */ ABTS_STR_EQUAL(tc, DATASTR, datastr); ABTS_SIZE_EQUAL(tc, strlen(datastr), wait_child(tc, &proc)); /* The child is dead, so should be the remote socket */ rv = apr_socket_atreadeof(sock2, &atreadeof); APR_ASSERT_SUCCESS(tc, "Determine whether at EOF, #2", rv); ABTS_INT_EQUAL(tc, 1, atreadeof); rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); launch_child(tc, &proc, "close", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); /* The child closed the socket as soon as it could... */ rv = apr_socket_atreadeof(sock2, &atreadeof); APR_ASSERT_SUCCESS(tc, "Determine whether at EOF, #3", rv); if (!atreadeof) { /* ... but perhaps not yet; wait a moment */ apr_sleep(apr_time_from_msec(5)); rv = apr_socket_atreadeof(sock2, &atreadeof); APR_ASSERT_SUCCESS(tc, "Determine whether at EOF, #4", rv); } ABTS_INT_EQUAL(tc, 1, atreadeof); wait_child(tc, &proc); rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); rv = apr_socket_close(sock); APR_ASSERT_SUCCESS(tc, "Problem closing socket", rv); }
void zmq::ipc_listener_t::in_event () { fd_t fd = accept (); // If connection was reset by the peer in the meantime, just ignore it. // TODO: Handle specific errors like ENFILE/EMFILE etc. if (fd == retired_fd) { socket->monitor_event (ZMQ_EVENT_ACCEPT_FAILED, endpoint.c_str(), zmq_errno()); return; } // Create the engine object for this connection. stream_engine_t *engine = new (std::nothrow) stream_engine_t (fd, options); alloc_assert (engine); // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. io_thread_t *io_thread = choose_io_thread (options.affinity); zmq_assert (io_thread); // Create and launch a session object. session_base_t *session = session_base_t::create (io_thread, false, socket, options, NULL); errno_assert (session); session->inc_seqnum (); launch_child (session); send_attach (session, engine, false); socket->monitor_event (ZMQ_EVENT_ACCEPTED, endpoint.c_str(), fd); }
static void test_recv(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *sock; apr_socket_t *sock2; apr_proc_t proc; int protocol; apr_size_t length = STRLEN; char datastr[STRLEN]; sock = setup_socket(tc); if (!sock) return; launch_child(tc, &proc, "write", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); apr_socket_protocol_get(sock2, &protocol); ABTS_INT_EQUAL(tc, APR_PROTO_TCP, protocol); memset(datastr, 0, STRLEN); apr_socket_recv(sock2, datastr, &length); /* Make sure that the server received the data we sent */ ABTS_STR_EQUAL(tc, DATASTR, datastr); ABTS_SIZE_EQUAL(tc, strlen(datastr), wait_child(tc, &proc)); rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); rv = apr_socket_close(sock); APR_ASSERT_SUCCESS(tc, "Problem closing socket", rv); }
static void test_timeout(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *sock; apr_socket_t *sock2; apr_proc_t proc; int protocol; int exit; sock = setup_socket(tc); if (!sock) return; launch_child(tc, &proc, "read", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); apr_socket_protocol_get(sock2, &protocol); ABTS_INT_EQUAL(tc, APR_PROTO_TCP, protocol); exit = wait_child(tc, &proc); ABTS_INT_EQUAL(tc, SOCKET_TIMEOUT, exit); /* We didn't write any data, so make sure the child program returns * an error. */ rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); rv = apr_socket_close(sock); APR_ASSERT_SUCCESS(tc, "Problem closing socket", rv); }
static void test_send(abts_case *tc, void *data) { apr_status_t rv; apr_socket_t *sock; apr_socket_t *sock2; apr_proc_t proc; int protocol; apr_size_t length; sock = setup_socket(tc); if (!sock) return; launch_child(tc, &proc, "read", p); rv = apr_socket_accept(&sock2, sock, p); APR_ASSERT_SUCCESS(tc, "Problem with receiving connection", rv); apr_socket_protocol_get(sock2, &protocol); ABTS_INT_EQUAL(tc, APR_PROTO_TCP, protocol); length = strlen(DATASTR); apr_socket_send(sock2, DATASTR, &length); /* Make sure that the client received the data we sent */ ABTS_SIZE_EQUAL(tc, strlen(DATASTR), wait_child(tc, &proc)); rv = apr_socket_close(sock2); APR_ASSERT_SUCCESS(tc, "Problem closing connected socket", rv); rv = apr_socket_close(sock); APR_ASSERT_SUCCESS(tc, "Problem closing socket", rv); }
int zmq::socket_base_t::bind (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { endpoint_t endpoint = {this, options}; return register_endpoint (addr_, endpoint); } if (protocol == "tcp" || protocol == "ipc") { // Choose I/O thread to run the listerner in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } // Create and run the listener. zmq_listener_t *listener = new (std::nothrow) zmq_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (protocol.c_str(), address.c_str ()); if (rc != 0) { delete listener; return -1; } launch_child (listener); return 0; } if (protocol == "pgm" || protocol == "epgm") { // For convenience's sake, bind can be used interchageable with // connect for PGM and EPGM transports. return connect (addr_); } zmq_assert (false); return -1; }
void ensure_handlers_started() { struct handler *h = handlers; while(h) { struct handler *next = h->next; if(h->h_type == PERSISTENT) { if(!launch_child(h)) { remove_handler(h); } } h = next; } }
void broadcast(struct message *m, fd_set *writefds) { struct handler *h = handlers; while(h) { if(h->h_type == PERSISTENT) { char *buff; switch(h->i_type) { case JSON: buff = message_to_json(m); if(! (ensure_write(h->fdout, buff, strlen(buff)) && ensure_write(h->fdout, "\n", 1))) { fprintf(stderr, "Failed to write to handler %s\n", h->command); } break; case RAW: buff = message_to_IRC(m); break; } } else if(h->h_type == TRANSIENT) { kill_child(h); launch_child(h); char *buff; switch(h->i_type) { case JSON: buff = message_to_json(m); if(! (ensure_write(h->fdout, buff, strlen(buff)) && ensure_write(h->fdout, "\n", 1))) { fprintf(stderr, "Failed to write to handler %s\n", h->command); } close(h->fdout); break; case RAW: buff = message_to_IRC(m); if( ! ensure_write(h->fdout, buff, strlen(buff))) { fprintf(stderr, "Failed to write to handler %s\n", h->command); } close(h->fdout); break; } } h = h->next; } }
void zmq::zmq_listener_t::in_event () { fd_t fd = tcp_listener.accept (); // If connection was reset by the peer in the meantime, just ignore it. // TODO: Handle specific errors like ENFILE/EMFILE etc. if (fd == retired_fd) return; // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. io_thread_t *io_thread = choose_io_thread (options.affinity); zmq_assert (io_thread); // Create and launch an init object. zmq_init_t *init = new (std::nothrow) zmq_init_t (io_thread, socket, NULL, fd, options); alloc_assert (init); launch_child (init); }
void zmq::vmci_listener_t::in_event () { fd_t fd = accept (); // If connection was reset by the peer in the meantime, just ignore it. if (fd == retired_fd) { socket->event_accept_failed (endpoint, zmq_errno()); return; } tune_vmci_buffer_size (this->get_ctx (), fd, options.vmci_buffer_size, options.vmci_buffer_min_size, options.vmci_buffer_max_size); if (options.vmci_connect_timeout > 0) { #if defined ZMQ_HAVE_WINDOWS tune_vmci_connect_timeout (this->get_ctx (), fd, options.vmci_connect_timeout); #else struct timeval timeout = {0, options.vmci_connect_timeout * 1000}; tune_vmci_connect_timeout (this->get_ctx (), fd, timeout); #endif } // Create the engine object for this connection. stream_engine_t *engine = new (std::nothrow) stream_engine_t (fd, options, endpoint); alloc_assert (engine); // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. io_thread_t *io_thread = choose_io_thread (options.affinity); zmq_assert (io_thread); // Create and launch a session object. session_base_t *session = session_base_t::create (io_thread, false, socket, options, NULL); errno_assert (session); session->inc_seqnum (); launch_child (session); send_attach (session, engine, false); socket->event_accepted (endpoint, fd); }
void zmq::tcp_listener_t::in_event () { fd_t fd = accept (); // If connection was reset by the peer in the meantime, just ignore it. // TODO: Handle specific errors like ENFILE/EMFILE etc. if (fd == retired_fd) { socket->event_accept_failed (endpoint, zmq_errno ()); return; } int rc = tune_tcp_socket (fd); rc = rc | tune_tcp_keepalives ( fd, options.tcp_keepalive, options.tcp_keepalive_cnt, options.tcp_keepalive_idle, options.tcp_keepalive_intvl); rc = rc | tune_tcp_maxrt (fd, options.tcp_maxrt); if (rc != 0) { socket->event_accept_failed (endpoint, zmq_errno ()); return; } // Create the engine object for this connection. stream_engine_t *engine = new (std::nothrow) stream_engine_t (fd, options, endpoint); alloc_assert (engine); // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. io_thread_t *io_thread = choose_io_thread (options.affinity); zmq_assert (io_thread); // Create and launch a session object. session_base_t *session = session_base_t::create (io_thread, false, socket, options, NULL); errno_assert (session); session->inc_seqnum (); launch_child (session); send_attach (session, engine, false); socket->event_accepted (endpoint, (int) fd); }
void zmq::socket_base_t::add_endpoint (const char *addr_, own_t *endpoint_) { // Activate the session. Make it a child of this socket. launch_child (endpoint_); endpoints.insert (std::make_pair (std::string (addr_), endpoint_)); }
void zmq::socket_base_t::add_endpoint (const char *addr_, own_t *endpoint_, pipe_t *pipe) { // Activate the session. Make it a child of this socket. launch_child (endpoint_); endpoints.insert (endpoints_t::value_type (std::string (addr_), endpoint_pipe_t(endpoint_, pipe))); }
int zmq::socket_base_t::connect (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); if (!peer.socket) return -1; reader_t *inpipe_reader = NULL; writer_t *inpipe_writer = NULL; reader_t *outpipe_reader = NULL; writer_t *outpipe_writer = NULL; // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. (Similarly for the // SWAP.) int64_t hwm; if (options.hwm == 0 || peer.options.hwm == 0) hwm = 0; else hwm = options.hwm + peer.options.hwm; int64_t swap; if (options.swap == 0 && peer.options.swap == 0) swap = 0; else swap = options.swap + peer.options.swap; // Create inbound pipe, if required. if (options.requires_in) create_pipe (this, peer.socket, hwm, swap, &inpipe_reader, &inpipe_writer); // Create outbound pipe, if required. if (options.requires_out) create_pipe (peer.socket, this, hwm, swap, &outpipe_reader, &outpipe_writer); // Attach the pipes to this socket object. attach_pipes (inpipe_reader, outpipe_writer, peer.options.identity); // Attach the pipes to the peer socket. Note that peer's seqnum // was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, outpipe_reader, inpipe_writer, options.identity, false); return 0; } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } // Create session. connect_session_t *session = new (std::nothrow) connect_session_t ( io_thread, this, options, protocol.c_str (), address.c_str ()); alloc_assert (session); // If 'immediate connect' feature is required, we'll create the pipes // to the session straight away. Otherwise, they'll be created by the // session once the connection is established. if (options.immediate_connect) { reader_t *inpipe_reader = NULL; writer_t *inpipe_writer = NULL; reader_t *outpipe_reader = NULL; writer_t *outpipe_writer = NULL; // Create inbound pipe, if required. if (options.requires_in) create_pipe (this, session, options.hwm, options.swap, &inpipe_reader, &inpipe_writer); // Create outbound pipe, if required. if (options.requires_out) create_pipe (session, this, options.hwm, options.swap, &outpipe_reader, &outpipe_writer); // Attach the pipes to the socket object. attach_pipes (inpipe_reader, outpipe_writer, blob_t ()); // Attach the pipes to the session object. session->attach_pipes (outpipe_reader, inpipe_writer, blob_t ()); } // Activate the session. Make it a child of this socket. launch_child (session); return 0; }
int zmq::socket_base_t::bind (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { endpoint_t endpoint = {this, options}; return register_endpoint (addr_, endpoint); } if (protocol == "pgm" || protocol == "epgm") { // For convenience's sake, bind can be used interchageable with // connect for PGM and EPGM transports. return connect (addr_); } // Remaining trasnports require to be run in an I/O thread, so at this // point we'll choose one. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } if (protocol == "tcp") { tcp_listener_t *listener = new (std::nothrow) tcp_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { delete listener; return -1; } launch_child (listener); return 0; } #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS if (protocol == "ipc") { ipc_listener_t *listener = new (std::nothrow) ipc_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { delete listener; return -1; } launch_child (listener); return 0; } #endif zmq_assert (false); return -1; }
void zmq::session_base_t::start_connecting (bool wait_) { zmq_assert (active); // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. io_thread_t *io_thread = choose_io_thread (options.affinity); zmq_assert (io_thread); // Create the connecter object. if (addr->protocol == "tcp") { if (!options.socks_proxy_address.empty()) { address_t *proxy_address = new (std::nothrow) address_t ("tcp", options.socks_proxy_address); alloc_assert (proxy_address); socks_connecter_t *connecter = new (std::nothrow) socks_connecter_t ( io_thread, this, options, addr, proxy_address, wait_); alloc_assert (connecter); launch_child (connecter); } else { tcp_connecter_t *connecter = new (std::nothrow) tcp_connecter_t (io_thread, this, options, addr, wait_); alloc_assert (connecter); launch_child (connecter); } return; } #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS if (addr->protocol == "ipc") { ipc_connecter_t *connecter = new (std::nothrow) ipc_connecter_t ( io_thread, this, options, addr, wait_); alloc_assert (connecter); launch_child (connecter); return; } #endif #if defined ZMQ_HAVE_TIPC if (addr->protocol == "tipc") { tipc_connecter_t *connecter = new (std::nothrow) tipc_connecter_t ( io_thread, this, options, addr, wait_); alloc_assert (connecter); launch_child (connecter); return; } #endif #ifdef ZMQ_HAVE_OPENPGM // Both PGM and EPGM transports are using the same infrastructure. if (addr->protocol == "pgm" || addr->protocol == "epgm") { zmq_assert (options.type == ZMQ_PUB || options.type == ZMQ_XPUB || options.type == ZMQ_SUB || options.type == ZMQ_XSUB); // For EPGM transport with UDP encapsulation of PGM is used. bool const udp_encapsulation = addr->protocol == "epgm"; // At this point we'll create message pipes to the session straight // away. There's no point in delaying it as no concept of 'connect' // exists with PGM anyway. if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) { // PGM sender. pgm_sender_t *pgm_sender = new (std::nothrow) pgm_sender_t ( io_thread, options); alloc_assert (pgm_sender); int rc = pgm_sender->init (udp_encapsulation, addr->address.c_str ()); errno_assert (rc == 0); send_attach (this, pgm_sender); } else { // PGM receiver. pgm_receiver_t *pgm_receiver = new (std::nothrow) pgm_receiver_t ( io_thread, options); alloc_assert (pgm_receiver); int rc = pgm_receiver->init (udp_encapsulation, addr->address.c_str ()); errno_assert (rc == 0); send_attach (this, pgm_receiver); } return; } #endif #ifdef ZMQ_HAVE_NORM if (addr->protocol == "norm") { // At this point we'll create message pipes to the session straight // away. There's no point in delaying it as no concept of 'connect' // exists with NORM anyway. if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) { // NORM sender. norm_engine_t* norm_sender = new (std::nothrow) norm_engine_t(io_thread, options); alloc_assert (norm_sender); int rc = norm_sender->init (addr->address.c_str (), true, false); errno_assert (rc == 0); send_attach (this, norm_sender); } else { // ZMQ_SUB or ZMQ_XSUB // NORM receiver. norm_engine_t* norm_receiver = new (std::nothrow) norm_engine_t (io_thread, options); alloc_assert (norm_receiver); int rc = norm_receiver->init (addr->address.c_str (), false, true); errno_assert (rc == 0); send_attach (this, norm_receiver); } return; } #endif // ZMQ_HAVE_NORM zmq_assert (false); }
int zmq::socket_base_t::connect (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); if (!peer.socket) return -1; // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. int sndhwm; int rcvhwm; if (options.sndhwm == 0 || peer.options.rcvhwm == 0) sndhwm = 0; else sndhwm = options.sndhwm + peer.options.rcvhwm; if (options.rcvhwm == 0 || peer.options.sndhwm == 0) rcvhwm = 0; else rcvhwm = options.rcvhwm + peer.options.sndhwm; // Create a bi-directional pipe to connect the peers. object_t *parents [2] = {this, peer.socket}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {sndhwm, rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; int rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // Attach local end of the pipe to this socket object. attach_pipe (pipes [0]); // If required, send the identity of the local socket to the peer. if (options.send_identity) { msg_t id; rc = id.init_size (options.identity_size); zmq_assert (rc == 0); memcpy (id.data (), options.identity, options.identity_size); id.set_flags (msg_t::identity); bool written = pipes [0]->write (&id); zmq_assert (written); } // Attach remote end of the pipe to the peer socket. Note that peer's // seqnum was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, pipes [1], false); return 0; } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } // Create session. session_base_t *session = session_base_t::create (io_thread, true, this, options, protocol.c_str (), address.c_str ()); errno_assert (session); // Create a bi-directional pipe. object_t *parents [2] = {this, session}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {options.sndhwm, options.rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // PGM does not support subscription forwarding; ask for all data to be // sent to this pipe. bool icanhasall = false; if (protocol == "pgm" || protocol == "epgm") icanhasall = true; // Attach local end of the pipe to the socket object. attach_pipe (pipes [0], icanhasall); // Attach remote end of the pipe to the session object later on. session->attach_pipe (pipes [1]); // Activate the session. Make it a child of this socket. launch_child (session); return 0; }
int zmq::socket_base_t::connect (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; // Checks that protocol is valid and supported on this system rc = check_protocol (protocol); if (rc != 0) return -1; // Parsed address for validation sockaddr_storage addr; socklen_t addr_len; if (protocol == "tcp") rc = resolve_ip_hostname (&addr, &addr_len, address.c_str ()); else if (protocol == "ipc") rc = resolve_local_path (&addr, &addr_len, address.c_str ()); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); if (!peer.socket) return -1; // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. int sndhwm; int rcvhwm; if (options.sndhwm == 0 || peer.options.rcvhwm == 0) sndhwm = 0; else sndhwm = options.sndhwm + peer.options.rcvhwm; if (options.rcvhwm == 0 || peer.options.sndhwm == 0) rcvhwm = 0; else rcvhwm = options.rcvhwm + peer.options.sndhwm; // Create a bi-directional pipe to connect the peers. object_t *parents [2] = {this, peer.socket}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {sndhwm, rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; int rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // Attach local end of the pipe to this socket object. attach_pipe (pipes [0], peer.options.identity); // Attach remote end of the pipe to the peer socket. Note that peer's // seqnum was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, pipes [1], options.identity, false); return 0; } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } // Create session. connect_session_t *session = new (std::nothrow) connect_session_t ( io_thread, this, options, protocol.c_str (), address.c_str ()); alloc_assert (session); // If 'immediate connect' feature is required, we'll create the pipes // to the session straight away. Otherwise, they'll be created by the // session once the connection is established. if (options.immediate_connect) { // Create a bi-directional pipe. object_t *parents [2] = {this, session}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {options.sndhwm, options.rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; int rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // Attach local end of the pipe to the socket object. attach_pipe (pipes [0], blob_t ()); // Attach remote end of the pipe to the session object later on. session->attach_pipe (pipes [1]); } // Activate the session. Make it a child of this socket. launch_child (session); return 0; }
void zmq::connect_session_t::start_connecting (bool wait_) { // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. io_thread_t *io_thread = choose_io_thread (options.affinity); zmq_assert (io_thread); // Create the connecter object. // Both TCP and IPC transports are using the same infrastructure. if (protocol == "tcp" || protocol == "ipc") { zmq_connecter_t *connecter = new (std::nothrow) zmq_connecter_t ( io_thread, this, options, protocol.c_str (), address.c_str (), wait_); alloc_assert (connecter); launch_child (connecter); return; } #if defined ZMQ_HAVE_OPENPGM // Both PGM and EPGM transports are using the same infrastructure. if (protocol == "pgm" || protocol == "epgm") { // For EPGM transport with UDP encapsulation of PGM is used. bool udp_encapsulation = (protocol == "epgm"); // At this point we'll create message pipes to the session straight // away. There's no point in delaying it as no concept of 'connect' // exists with PGM anyway. if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) { // PGM sender. pgm_sender_t *pgm_sender = new (std::nothrow) pgm_sender_t ( io_thread, options); alloc_assert (pgm_sender); int rc = pgm_sender->init (udp_encapsulation, address.c_str ()); zmq_assert (rc == 0); send_attach (this, pgm_sender, blob_t ()); } else if (options.type == ZMQ_SUB || options.type == ZMQ_XSUB) { // PGM receiver. pgm_receiver_t *pgm_receiver = new (std::nothrow) pgm_receiver_t ( io_thread, options); alloc_assert (pgm_receiver); int rc = pgm_receiver->init (udp_encapsulation, address.c_str ()); zmq_assert (rc == 0); send_attach (this, pgm_receiver, blob_t ()); } else zmq_assert (false); return; } #endif zmq_assert (false); }
int string_launch_child(struct state *s, char *string) { char **vector, **tmp; char *t; unsigned int i, v, j, d, ws; int run; vector = NULL; j = 0; i = 0; ws = 1; v = 0; for(run = 1; run > 0;){ switch(string[i]){ case '\0' : run = 0; /* fall */ case ' ' : case '\t' : if(ws == 0){ ws = 1; tmp = realloc(vector, sizeof(char *) * (v + 2)); if(tmp == NULL){ run = (-1); } else { vector = tmp; d = i - j; t = malloc(sizeof(char) * (d + 1)); if(t == NULL){ run = (-1); } else { strncpy(t, string + j, d); t[d] = '\0'; vector[v] = t; v++; vector[v] = NULL; } } } i++; break; default : if(ws){ j = i; ws = 0; } i++; break; } } if(vector == NULL){ sync_message_katcl(s->s_up, KATCP_LEVEL_ERROR, KCPCON_NAME, "no command found in string <%s>", string); return -1; } if(run < 0){ sync_message_katcl(s->s_up, KATCP_LEVEL_ERROR, KCPCON_NAME, "allocation error while decomposing %s", string); } else { #ifdef DEBUG fprintf(stderr, "have vector <%s ...> of %u entries\n", vector[0], v); #endif run = launch_child(s, vector); } if(vector){ for(i = 0; i < v; i++){ if(vector[i]){ free(vector[i]); vector[i] = NULL; } } free(vector); } return run; }