// 包处理回调函数,对于每个嗅探到的数据包 void packet_handler(u_char *param, const struct pcap_pkthdr *header, const u_char *pkt_data) { ip_header *ih; u_int ip_len; // 返回IP首部的位置 ih = (ip_header *) (pkt_data + 14); //以太网的首部长度是14 // IP首部长度 ip_len = (ih->ver_ihl & 0xf) * 4; printf("%s ",check_protocol(ih->proto)); // 输出源地址IP和目的地址IP printf("%d.%d.%d.%d -> %d.%d.%d.%d ", ih->saddr.byte1, ih->saddr.byte2, ih->saddr.byte3, ih->saddr.byte4, ih->daddr.byte1, ih->daddr.byte2, ih->daddr.byte3, ih->daddr.byte4 ); decode_ip((char*)(pkt_data+14+ip_len),ih->proto); printf("\n"); }
int zmq::socket_base_t::term_endpoint (const char *addr_) { // Check whether the library haven't been shut down yet. if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Check whether endpoint address passed to the function is valid. if (unlikely (!addr_)) { errno = EINVAL; return -1; } // Process pending commands, if any, since there could be pending unprocessed process_own()'s // (from launch_child() for example) we're asked to terminate now. int rc = process_commands (0, false); if (unlikely (rc != 0)) return -1; // Parse addr_ string. std::string protocol; std::string address; rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; // Disconnect an inproc socket if (protocol == "inproc") { std::pair <inprocs_t::iterator, inprocs_t::iterator> range = inprocs.equal_range (std::string (addr_)); if (range.first == range.second) { errno = ENOENT; return -1; } for (inprocs_t::iterator it = range.first; it != range.second; ++it) it->second->terminate(true); inprocs.erase (range.first, range.second); return 0; } // Find the endpoints range (if any) corresponding to the addr_ string. std::pair <endpoints_t::iterator, endpoints_t::iterator> range = endpoints.equal_range (std::string (addr_)); if (range.first == range.second) { errno = ENOENT; return -1; } for (endpoints_t::iterator it = range.first; it != range.second; ++it) term_child (it->second); endpoints.erase (range.first, range.second); return 0; }
int zmq::socket_base_t::bind (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { endpoint_t endpoint = {this, options}; return register_endpoint (addr_, endpoint); } if (protocol == "tcp" || protocol == "ipc") { // Choose I/O thread to run the listerner in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } // Create and run the listener. zmq_listener_t *listener = new (std::nothrow) zmq_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (protocol.c_str(), address.c_str ()); if (rc != 0) { delete listener; return -1; } launch_child (listener); return 0; } if (protocol == "pgm" || protocol == "epgm") { // For convenience's sake, bind can be used interchageable with // connect for PGM and EPGM transports. return connect (addr_); } zmq_assert (false); return -1; }
void *thread_connection(void *args) { int connection_socket = ((struct thread_arguments *) args ) -> connection_socket; struct sockaddr_in client_address = ((struct thread_arguments *) args ) -> client_address; struct client_data data = get_client_address(client_address); char buffer[BUFSIZE]; bzero(buffer, BUFSIZE); int status = recv(connection_socket, buffer, BUFSIZE, MSG_DONTWAIT); if (WHITELIST != NULL && check_whitelist(data.ip_address) == NULL) { display_info(data, NULL, "Rejected connection from unknown user."); save_log(NULL, data.ip_address, data.hostname); if (write(connection_socket, "You are not whitelisted!\n", 26) < 0) printf("Error writing on stream socket\n"); close(connection_socket); pthread_exit(NULL); } if (BANLIST != NULL && check_banlist(data.ip_address) != NULL) { display_info(data, NULL, "Rejected connection from banned user."); save_log(NULL, data.ip_address, data.hostname); if (write(connection_socket, "You are banned!\n", 17) < 0) printf("Error writing on stream socket\n"); close(connection_socket); pthread_exit(NULL); } if (check_protocol(buffer) == 1) status = -1; if (status != -1) { char slug[SLUG_SIZE+8]; generate_url(buffer, slug, SLUG_SIZE+8, data); save_log(slug, data.ip_address, data.hostname); char response[strlen(slug) + strlen(DOMAIN) + 2]; snprintf(response, sizeof response, "%s%s\n", DOMAIN, slug); if (write(connection_socket, response, strlen(response)) < 0) printf("Error writing on stream socket\n"); } else { display_info(data, NULL, "Invalid connection."); save_log(NULL, data.ip_address, data.hostname); if (write(connection_socket, "Use netcat.\n", 12) < 0) printf("Error writing on stream socket\n"); } close(connection_socket); pthread_exit(NULL); }
/* * This could be further simplified by constructing an expected * HANDSHAKE_RESULT, and implementing comparison methods for * its fields. */ static int check_test(HANDSHAKE_RESULT result, SSL_TEST_CTX *test_ctx) { int ret = 1; ret &= check_result(result, test_ctx); ret &= check_alerts(result, test_ctx); if (result.result == SSL_TEST_SUCCESS) { ret &= check_protocol(result, test_ctx); ret &= check_servername(result, test_ctx); ret &= check_session_ticket(result, test_ctx); ret &= (result.session_ticket_do_not_call == 0); } return ret; }
int zmq::socket_base_t::monitor (const char *addr_, int events_) { int rc; if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Support deregistering monitoring endpoints as well if (addr_ == NULL) { stop_monitor (); return 0; } // Parse addr_ string. std::string protocol; std::string address; rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; // Event notification only supported over inproc:// if (protocol != "inproc") { errno = EPROTONOSUPPORT; return -1; } // Register events to monitor monitor_events = events_; monitor_socket = zmq_socket (get_ctx (), ZMQ_PAIR); if (monitor_socket == NULL) return -1; // Never block context termination on pending event messages int linger = 0; rc = zmq_setsockopt (monitor_socket, ZMQ_LINGER, &linger, sizeof (linger)); if (rc == -1) stop_monitor (); // Spawn the monitor socket endpoint rc = zmq_bind (monitor_socket, addr_); if (rc == -1) stop_monitor (); return rc; }
static bool check_options( bool is_icmp, bool is_tcp, bool is_udp, bool is_ipv4, bool is_ipv6, int dst_port_enabled, int src_port_enabled, const char * protocol_name, const char * algorithm_name ) { return check_ip_version(is_ipv4, is_ipv6) && check_protocol(is_icmp, is_tcp, is_udp, protocol_name) && check_ports(is_icmp, dst_port_enabled, src_port_enabled) && check_algorithm(algorithm_name); }
/* * This could be further simplified by constructing an expected * HANDSHAKE_RESULT, and implementing comparison methods for * its fields. */ static int check_test(HANDSHAKE_RESULT *result, SSL_TEST_CTX *test_ctx) { int ret = 1; ret &= check_result(result, test_ctx); ret &= check_alerts(result, test_ctx); if (result->result == SSL_TEST_SUCCESS) { ret &= check_protocol(result, test_ctx); ret &= check_servername(result, test_ctx); ret &= check_session_ticket(result, test_ctx); ret &= (result->session_ticket_do_not_call == 0); #ifndef OPENSSL_NO_NEXTPROTONEG ret &= check_npn(result, test_ctx); #endif ret &= check_alpn(result, test_ctx); ret &= check_resumption(result, test_ctx); ret &= check_tmp_key(result, test_ctx); } return ret; }
int zmq::socket_base_t::connect (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Process pending commands, if any. int rc = process_commands (0, false); if (unlikely (rc != 0)) return -1; // Parse addr_ string. std::string protocol; std::string address; rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. int sndhwm = 0; if (peer.socket == NULL) sndhwm = options.sndhwm; else if (options.sndhwm != 0 && peer.options.rcvhwm != 0) sndhwm = options.sndhwm + peer.options.rcvhwm; int rcvhwm = 0; if (peer.socket == NULL) rcvhwm = options.rcvhwm; else if (options.rcvhwm != 0 && peer.options.sndhwm != 0) rcvhwm = options.rcvhwm + peer.options.sndhwm; // Create a bi-directional pipe to connect the peers. object_t *parents [2] = {this, peer.socket == NULL ? this : peer.socket}; pipe_t *new_pipes [2] = {NULL, NULL}; bool conflate = options.conflate && (options.type == ZMQ_DEALER || options.type == ZMQ_PULL || options.type == ZMQ_PUSH || options.type == ZMQ_PUB || options.type == ZMQ_SUB); int hwms [2] = {conflate? -1 : sndhwm, conflate? -1 : rcvhwm}; bool conflates [2] = {conflate, conflate}; int rc = pipepair (parents, new_pipes, hwms, conflates); errno_assert (rc == 0); // Attach local end of the pipe to this socket object. attach_pipe (new_pipes [0]); if (!peer.socket) { // The peer doesn't exist yet so we don't know whether // to send the identity message or not. To resolve this, // we always send our identity and drop it later if // the peer doesn't expect it. msg_t id; rc = id.init_size (options.identity_size); errno_assert (rc == 0); memcpy (id.data (), options.identity, options.identity_size); id.set_flags (msg_t::identity); bool written = new_pipes [0]->write (&id); zmq_assert (written); new_pipes [0]->flush (); endpoint_t endpoint = {this, options}; pending_connection_t pending_connection = {endpoint, new_pipes [0], new_pipes [1]}; pend_connection (addr_, pending_connection); } else { // If required, send the identity of the local socket to the peer. if (peer.options.recv_identity) { msg_t id; rc = id.init_size (options.identity_size); errno_assert (rc == 0); memcpy (id.data (), options.identity, options.identity_size); id.set_flags (msg_t::identity); bool written = new_pipes [0]->write (&id); zmq_assert (written); new_pipes [0]->flush (); } // If required, send the identity of the peer to the local socket. if (options.recv_identity) { msg_t id; rc = id.init_size (peer.options.identity_size); errno_assert (rc == 0); memcpy (id.data (), peer.options.identity, peer.options.identity_size); id.set_flags (msg_t::identity); bool written = new_pipes [1]->write (&id); zmq_assert (written); new_pipes [1]->flush (); } // Attach remote end of the pipe to the peer socket. Note that peer's // seqnum was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, new_pipes [1], false); } // Save last endpoint URI last_endpoint.assign (addr_); // remember inproc connections for disconnect inprocs.insert (inprocs_t::value_type (std::string (addr_), new_pipes[0])); return 0; } bool is_single_connect = (options.type == ZMQ_DEALER || options.type == ZMQ_SUB || options.type == ZMQ_REQ); if (unlikely (is_single_connect)) { endpoints_t::iterator it = endpoints.find (addr_); if (it != endpoints.end ()) { // There is no valid use for multiple connects for SUB-PUB nor // DEALER-ROUTER nor REQ-REP. Multiple connects produces // nonsensical results. return 0; } } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } address_t *paddr = new (std::nothrow) address_t (protocol, address); alloc_assert (paddr); // Resolve address (if needed by the protocol) if (protocol == "tcp") { // Defer resolution until a socket is opened paddr->resolved.tcp_addr = NULL; } #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS else if (protocol == "ipc") { paddr->resolved.ipc_addr = new (std::nothrow) ipc_address_t (); alloc_assert (paddr->resolved.ipc_addr); int rc = paddr->resolved.ipc_addr->resolve (address.c_str ()); if (rc != 0) { delete paddr; return -1; } } #endif // TBD - Should we check address for ZMQ_HAVE_NORM??? #ifdef ZMQ_HAVE_OPENPGM if (protocol == "pgm" || protocol == "epgm") { struct pgm_addrinfo_t *res = NULL; uint16_t port_number = 0; int rc = pgm_socket_t::init_address(address.c_str(), &res, &port_number); if (res != NULL) pgm_freeaddrinfo (res); if (rc != 0 || port_number == 0) return -1; } #endif #if defined ZMQ_HAVE_TIPC else if (protocol == "tipc") { paddr->resolved.tipc_addr = new (std::nothrow) tipc_address_t (); alloc_assert (paddr->resolved.tipc_addr); int rc = paddr->resolved.tipc_addr->resolve (address.c_str()); if (rc != 0) { delete paddr; return -1; } } #endif // Create session. session_base_t *session = session_base_t::create (io_thread, true, this, options, paddr); errno_assert (session); // PGM does not support subscription forwarding; ask for all data to be // sent to this pipe. (same for NORM, currently?) bool subscribe_to_all = protocol == "pgm" || protocol == "epgm" || protocol == "norm"; pipe_t *newpipe = NULL; if (options.immediate != 1 || subscribe_to_all) { // Create a bi-directional pipe. object_t *parents [2] = {this, session}; pipe_t *new_pipes [2] = {NULL, NULL}; bool conflate = options.conflate && (options.type == ZMQ_DEALER || options.type == ZMQ_PULL || options.type == ZMQ_PUSH || options.type == ZMQ_PUB || options.type == ZMQ_SUB); int hwms [2] = {conflate? -1 : options.sndhwm, conflate? -1 : options.rcvhwm }; bool conflates [2] = {conflate, conflate}; rc = pipepair (parents, new_pipes, hwms, conflates); errno_assert (rc == 0); // Attach local end of the pipe to the socket object. attach_pipe (new_pipes [0], subscribe_to_all); newpipe = new_pipes [0]; // Attach remote end of the pipe to the session object later on. session->attach_pipe (new_pipes [1]); } // Save last endpoint URI paddr->to_string (last_endpoint); add_endpoint (addr_, (own_t *) session, newpipe); return 0; }
int zmq::socket_base_t::bind (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Process pending commands, if any. int rc = process_commands (0, false); if (unlikely (rc != 0)) return -1; // Parse addr_ string. std::string protocol; std::string address; rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc") { endpoint_t endpoint = {this, options}; int rc = register_endpoint (addr_, endpoint); if (rc == 0) { connect_pending(addr_, this); last_endpoint.assign (addr_); } return rc; } if (protocol == "pgm" || protocol == "epgm" || protocol == "norm") { // For convenience's sake, bind can be used interchageable with // connect for PGM, EPGM and NORM transports. return connect (addr_); } // Remaining trasnports require to be run in an I/O thread, so at this // point we'll choose one. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } if (protocol == "tcp") { tcp_listener_t *listener = new (std::nothrow) tcp_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { delete listener; event_bind_failed (address, zmq_errno()); return -1; } // Save last endpoint URI listener->get_address (last_endpoint); add_endpoint (addr_, (own_t *) listener, NULL); return 0; } #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS if (protocol == "ipc") { ipc_listener_t *listener = new (std::nothrow) ipc_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { delete listener; event_bind_failed (address, zmq_errno()); return -1; } // Save last endpoint URI listener->get_address (last_endpoint); add_endpoint (addr_, (own_t *) listener, NULL); return 0; } #endif #if defined ZMQ_HAVE_TIPC if (protocol == "tipc") { tipc_listener_t *listener = new (std::nothrow) tipc_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { delete listener; event_bind_failed (address, zmq_errno()); return -1; } // Save last endpoint URI listener->get_address (last_endpoint); add_endpoint (addr_, (own_t *) listener, NULL); return 0; } #endif zmq_assert (false); return -1; }
int zmq::socket_base_t::connect (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Process pending commands, if any. int rc = process_commands (0, false); if (unlikely (rc != 0)) return -1; // Parse addr_ string. std::string protocol; std::string address; rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); if (!peer.socket) return -1; // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. int sndhwm; int rcvhwm; if (options.sndhwm == 0 || peer.options.rcvhwm == 0) sndhwm = 0; else sndhwm = options.sndhwm + peer.options.rcvhwm; if (options.rcvhwm == 0 || peer.options.sndhwm == 0) rcvhwm = 0; else rcvhwm = options.rcvhwm + peer.options.sndhwm; // Create a bi-directional pipe to connect the peers. object_t *parents [2] = {this, peer.socket}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {sndhwm, rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; int rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // Attach local end of the pipe to this socket object. attach_pipe (pipes [0]); // If required, send the identity of the local socket to the peer. if (options.send_identity) { msg_t id; rc = id.init_size (options.identity_size); zmq_assert (rc == 0); memcpy (id.data (), options.identity, options.identity_size); id.set_flags (msg_t::identity); bool written = pipes [0]->write (&id); zmq_assert (written); pipes [0]->flush (); } // If required, send the identity of the peer to the local socket. if (peer.options.send_identity) { msg_t id; rc = id.init_size (peer.options.identity_size); zmq_assert (rc == 0); memcpy (id.data (), peer.options.identity, peer.options.identity_size); id.set_flags (msg_t::identity); bool written = pipes [1]->write (&id); zmq_assert (written); pipes [1]->flush (); } // Attach remote end of the pipe to the peer socket. Note that peer's // seqnum was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, pipes [1], false); // Save last endpoint URI options.last_endpoint.assign (addr_); return 0; } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } address_t *paddr = new (std::nothrow) address_t (protocol, address); zmq_assert (paddr); // Resolve address (if needed by the protocol) if (protocol == "tcp") { paddr->resolved.tcp_addr = new (std::nothrow) tcp_address_t (); zmq_assert (paddr->resolved.tcp_addr); int rc = paddr->resolved.tcp_addr->resolve ( address.c_str (), false, options.ipv4only ? true : false); if (rc != 0) { delete paddr; return -1; } } #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS else if(protocol == "ipc") { paddr->resolved.ipc_addr = new (std::nothrow) ipc_address_t (); zmq_assert (paddr->resolved.ipc_addr); int rc = paddr->resolved.ipc_addr->resolve (address.c_str ()); if (rc != 0) { delete paddr; return -1; } } #endif // Create session. session_base_t *session = session_base_t::create (io_thread, true, this, options, paddr); errno_assert (session); // Create a bi-directional pipe. object_t *parents [2] = {this, session}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {options.sndhwm, options.rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // PGM does not support subscription forwarding; ask for all data to be // sent to this pipe. bool icanhasall = false; if (protocol == "pgm" || protocol == "epgm") icanhasall = true; // Attach local end of the pipe to the socket object. attach_pipe (pipes [0], icanhasall); // Attach remote end of the pipe to the session object later on. session->attach_pipe (pipes [1]); // Save last endpoint URI paddr->to_string (options.last_endpoint); add_endpoint (addr_, (own_t *) session); return 0; }
int zmq::socket_base_t::connect (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); if (!peer.socket) return -1; reader_t *inpipe_reader = NULL; writer_t *inpipe_writer = NULL; reader_t *outpipe_reader = NULL; writer_t *outpipe_writer = NULL; // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. (Similarly for the // SWAP.) int64_t hwm; if (options.hwm == 0 || peer.options.hwm == 0) hwm = 0; else hwm = options.hwm + peer.options.hwm; int64_t swap; if (options.swap == 0 && peer.options.swap == 0) swap = 0; else swap = options.swap + peer.options.swap; // Create inbound pipe, if required. if (options.requires_in) create_pipe (this, peer.socket, hwm, swap, &inpipe_reader, &inpipe_writer); // Create outbound pipe, if required. if (options.requires_out) create_pipe (peer.socket, this, hwm, swap, &outpipe_reader, &outpipe_writer); // Attach the pipes to this socket object. attach_pipes (inpipe_reader, outpipe_writer, peer.options.identity); // Attach the pipes to the peer socket. Note that peer's seqnum // was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, outpipe_reader, inpipe_writer, options.identity, false); return 0; } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } // Create session. connect_session_t *session = new (std::nothrow) connect_session_t ( io_thread, this, options, protocol.c_str (), address.c_str ()); alloc_assert (session); // If 'immediate connect' feature is required, we'll create the pipes // to the session straight away. Otherwise, they'll be created by the // session once the connection is established. if (options.immediate_connect) { reader_t *inpipe_reader = NULL; writer_t *inpipe_writer = NULL; reader_t *outpipe_reader = NULL; writer_t *outpipe_writer = NULL; // Create inbound pipe, if required. if (options.requires_in) create_pipe (this, session, options.hwm, options.swap, &inpipe_reader, &inpipe_writer); // Create outbound pipe, if required. if (options.requires_out) create_pipe (session, this, options.hwm, options.swap, &outpipe_reader, &outpipe_writer); // Attach the pipes to the socket object. attach_pipes (inpipe_reader, outpipe_writer, blob_t ()); // Attach the pipes to the session object. session->attach_pipes (outpipe_reader, inpipe_writer, blob_t ()); } // Activate the session. Make it a child of this socket. launch_child (session); return 0; }
int zmq::socket_base_t::bind (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { endpoint_t endpoint = {this, options}; return register_endpoint (addr_, endpoint); } if (protocol == "pgm" || protocol == "epgm") { // For convenience's sake, bind can be used interchageable with // connect for PGM and EPGM transports. return connect (addr_); } // Remaining trasnports require to be run in an I/O thread, so at this // point we'll choose one. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } if (protocol == "tcp") { tcp_listener_t *listener = new (std::nothrow) tcp_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { delete listener; return -1; } launch_child (listener); return 0; } #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS if (protocol == "ipc") { ipc_listener_t *listener = new (std::nothrow) ipc_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { delete listener; return -1; } launch_child (listener); return 0; } #endif zmq_assert (false); return -1; }
int zmq::socket_base_t::connect (const char *addr_) { ENTER_MUTEX(); if (unlikely (ctx_terminated)) { errno = ETERM; EXIT_MUTEX(); return -1; } // Process pending commands, if any. int rc = process_commands (0, false); if (unlikely (rc != 0)) { EXIT_MUTEX(); return -1; } // Parse addr_ string. std::string protocol; std::string address; if (parse_uri (addr_, protocol, address) || check_protocol (protocol)) { EXIT_MUTEX(); return -1; } if (protocol == "inproc") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. int sndhwm = 0; if (peer.socket == NULL) sndhwm = options.sndhwm; else if (options.sndhwm != 0 && peer.options.rcvhwm != 0) sndhwm = options.sndhwm + peer.options.rcvhwm; int rcvhwm = 0; if (peer.socket == NULL) rcvhwm = options.rcvhwm; else if (options.rcvhwm != 0 && peer.options.sndhwm != 0) rcvhwm = options.rcvhwm + peer.options.sndhwm; // Create a bi-directional pipe to connect the peers. object_t *parents [2] = {this, peer.socket == NULL ? this : peer.socket}; pipe_t *new_pipes [2] = {NULL, NULL}; bool conflate = options.conflate && (options.type == ZMQ_DEALER || options.type == ZMQ_PULL || options.type == ZMQ_PUSH || options.type == ZMQ_PUB || options.type == ZMQ_SUB); int hwms [2] = {conflate? -1 : sndhwm, conflate? -1 : rcvhwm}; bool conflates [2] = {conflate, conflate}; int rc = pipepair (parents, new_pipes, hwms, conflates); if (!conflate) { new_pipes[0]->set_hwms_boost(peer.options.sndhwm, peer.options.rcvhwm); new_pipes[1]->set_hwms_boost(options.sndhwm, options.rcvhwm); } errno_assert (rc == 0); if (!peer.socket) { // The peer doesn't exist yet so we don't know whether // to send the identity message or not. To resolve this, // we always send our identity and drop it later if // the peer doesn't expect it. msg_t id; rc = id.init_size (options.identity_size); errno_assert (rc == 0); memcpy (id.data (), options.identity, options.identity_size); id.set_flags (msg_t::identity); bool written = new_pipes [0]->write (&id); zmq_assert (written); new_pipes [0]->flush (); const endpoint_t endpoint = {this, options}; pend_connection (std::string (addr_), endpoint, new_pipes); } else { // If required, send the identity of the local socket to the peer. if (peer.options.recv_identity) { msg_t id; rc = id.init_size (options.identity_size); errno_assert (rc == 0); memcpy (id.data (), options.identity, options.identity_size); id.set_flags (msg_t::identity); bool written = new_pipes [0]->write (&id); zmq_assert (written); new_pipes [0]->flush (); } // If required, send the identity of the peer to the local socket. if (options.recv_identity) { msg_t id; rc = id.init_size (peer.options.identity_size); errno_assert (rc == 0); memcpy (id.data (), peer.options.identity, peer.options.identity_size); id.set_flags (msg_t::identity); bool written = new_pipes [1]->write (&id); zmq_assert (written); new_pipes [1]->flush (); } // Attach remote end of the pipe to the peer socket. Note that peer's // seqnum was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, new_pipes [1], false); } // Attach local end of the pipe to this socket object. attach_pipe (new_pipes [0]); // Save last endpoint URI last_endpoint.assign (addr_); // remember inproc connections for disconnect inprocs.insert (inprocs_t::value_type (std::string (addr_), new_pipes [0])); options.connected = true; EXIT_MUTEX(); return 0; } bool is_single_connect = (options.type == ZMQ_DEALER || options.type == ZMQ_SUB || options.type == ZMQ_REQ); if (unlikely (is_single_connect)) { const endpoints_t::iterator it = endpoints.find (addr_); if (it != endpoints.end ()) { // There is no valid use for multiple connects for SUB-PUB nor // DEALER-ROUTER nor REQ-REP. Multiple connects produces // nonsensical results. EXIT_MUTEX(); return 0; } } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; EXIT_MUTEX(); return -1; } address_t *paddr = new (std::nothrow) address_t (protocol, address); alloc_assert (paddr); // Resolve address (if needed by the protocol) if (protocol == "tcp") { // Do some basic sanity checks on tcp:// address syntax // - hostname starts with digit or letter, with embedded '-' or '.' // - IPv6 address may contain hex chars and colons. // - IPv4 address may contain decimal digits and dots. // - Address must end in ":port" where port is *, or numeric // - Address may contain two parts separated by ':' // Following code is quick and dirty check to catch obvious errors, // without trying to be fully accurate. const char *check = address.c_str (); if (isalnum (*check) || isxdigit (*check) || *check == '[') { check++; while (isalnum (*check) || isxdigit (*check) || *check == '.' || *check == '-' || *check == ':'|| *check == ';' || *check == ']') check++; } // Assume the worst, now look for success rc = -1; // Did we reach the end of the address safely? if (*check == 0) { // Do we have a valid port string? (cannot be '*' in connect check = strrchr (address.c_str (), ':'); if (check) { check++; if (*check && (isdigit (*check))) rc = 0; // Valid } } if (rc == -1) { errno = EINVAL; LIBZMQ_DELETE(paddr); EXIT_MUTEX(); return -1; } // Defer resolution until a socket is opened paddr->resolved.tcp_addr = NULL; } #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS else if (protocol == "ipc") { paddr->resolved.ipc_addr = new (std::nothrow) ipc_address_t (); alloc_assert (paddr->resolved.ipc_addr); int rc = paddr->resolved.ipc_addr->resolve (address.c_str ()); if (rc != 0) { LIBZMQ_DELETE(paddr); EXIT_MUTEX(); return -1; } } #endif // TBD - Should we check address for ZMQ_HAVE_NORM??? #ifdef ZMQ_HAVE_OPENPGM if (protocol == "pgm" || protocol == "epgm") { struct pgm_addrinfo_t *res = NULL; uint16_t port_number = 0; int rc = pgm_socket_t::init_address(address.c_str(), &res, &port_number); if (res != NULL) pgm_freeaddrinfo (res); if (rc != 0 || port_number == 0) { EXIT_MUTEX(); return -1; } } #endif #if defined ZMQ_HAVE_TIPC else if (protocol == "tipc") { paddr->resolved.tipc_addr = new (std::nothrow) tipc_address_t (); alloc_assert (paddr->resolved.tipc_addr); int rc = paddr->resolved.tipc_addr->resolve (address.c_str()); if (rc != 0) { LIBZMQ_DELETE(paddr); EXIT_MUTEX(); return -1; } } #endif // Create session. session_base_t *session = session_base_t::create (io_thread, true, this, options, paddr); errno_assert (session); // PGM does not support subscription forwarding; ask for all data to be // sent to this pipe. (same for NORM, currently?) bool subscribe_to_all = protocol == "pgm" || protocol == "epgm" || protocol == "norm"; pipe_t *newpipe = NULL; if (options.immediate != 1 || subscribe_to_all) { // Create a bi-directional pipe. object_t *parents [2] = {this, session}; pipe_t *new_pipes [2] = {NULL, NULL}; bool conflate = options.conflate && (options.type == ZMQ_DEALER || options.type == ZMQ_PULL || options.type == ZMQ_PUSH || options.type == ZMQ_PUB || options.type == ZMQ_SUB); int hwms [2] = {conflate? -1 : options.sndhwm, conflate? -1 : options.rcvhwm}; bool conflates [2] = {conflate, conflate}; rc = pipepair (parents, new_pipes, hwms, conflates); errno_assert (rc == 0); // Attach local end of the pipe to the socket object. attach_pipe (new_pipes [0], subscribe_to_all); newpipe = new_pipes [0]; // Attach remote end of the pipe to the session object later on. session->attach_pipe (new_pipes [1]); } // Save last endpoint URI paddr->to_string (last_endpoint); add_endpoint (addr_, (own_t *) session, newpipe); EXIT_MUTEX(); return 0; }
int parse_url(unsigned char *url, int *prlen, unsigned char **user, int *uslen, unsigned char **pass, int *palen, unsigned char **host, int *holen, unsigned char **port, int *polen, unsigned char **data, int *dalen, unsigned char **post) { unsigned char *p, *q; unsigned char p_c[2]; int a; if (prlen) *prlen = 0; if (user) *user = NULL; if (uslen) *uslen = 0; if (pass) *pass = NULL; if (palen) *palen = 0; if (host) *host = NULL; if (holen) *holen = 0; if (port) *port = NULL; if (polen) *polen = 0; if (data) *data = NULL; if (dalen) *dalen = 0; if (post) *post = NULL; if (!url || !(p = cast_uchar strchr(cast_const_char url, ':'))) return -1; if (prlen) *prlen = (int)(p - url); if ((a = check_protocol(url, (int)(p - url))) == -1) return -1; if (p[1] != '/' || p[2] != '/') { if (protocols[a].need_slashes) return -1; p -= 2; } if (protocols[a].free_syntax) { if (data) *data = p + 3; if (dalen) *dalen = (int)strlen(cast_const_char(p + 3)); return 0; } p += 3; q = p + strcspn(cast_const_char p, "@/?"); if (!*q && protocols[a].need_slash_after_host) return -1; if (*q == '@') { unsigned char *pp; while (strcspn(cast_const_char(q + 1), "@") < strcspn(cast_const_char(q + 1), "/?")) q = q + 1 + strcspn(cast_const_char(q + 1), "@"); pp = cast_uchar strchr(cast_const_char p, ':'); if (!pp || pp > q) { if (user) *user = p; if (uslen) *uslen = (int)(q - p); } else { if (user) *user = p; if (uslen) *uslen = (int)(pp - p); if (pass) *pass = pp + 1; if (palen) *palen = (int)(q - pp - 1); } p = q + 1; } if (p[0] == '[') { q = cast_uchar strchr(cast_const_char p, ']'); if (q) { q++; goto have_host; } } q = p + strcspn(cast_const_char p, ":/?"); have_host: if (!*q && protocols[a].need_slash_after_host) return -1; if (host) *host = p; if (holen) *holen = (int)(q - p); if (*q == ':') { unsigned char *pp = q + strcspn(cast_const_char q, "/"); int cc; if (*pp != '/' && protocols[a].need_slash_after_host) return -1; if (port) *port = q + 1; if (polen) *polen = (int)(pp - q - 1); for (cc = 0; cc < pp - q - 1; cc++) if (q[cc+1] < '0' || q[cc+1] > '9') return -1; q = pp; } if (*q && *q != '?') q++; p = q; p_c[0] = POST_CHAR; p_c[1] = 0; q = p + strcspn(cast_const_char p, cast_const_char p_c); if (data) *data = p; if (dalen) *dalen = (int)(q - p); if (post) *post = *q ? q + 1 : NULL; return 0; }
int zmq::socket_base_t::connect (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; // Checks that protocol is valid and supported on this system rc = check_protocol (protocol); if (rc != 0) return -1; // Parsed address for validation sockaddr_storage addr; socklen_t addr_len; if (protocol == "tcp") rc = resolve_ip_hostname (&addr, &addr_len, address.c_str ()); else if (protocol == "ipc") rc = resolve_local_path (&addr, &addr_len, address.c_str ()); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); if (!peer.socket) return -1; // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. int sndhwm; int rcvhwm; if (options.sndhwm == 0 || peer.options.rcvhwm == 0) sndhwm = 0; else sndhwm = options.sndhwm + peer.options.rcvhwm; if (options.rcvhwm == 0 || peer.options.sndhwm == 0) rcvhwm = 0; else rcvhwm = options.rcvhwm + peer.options.sndhwm; // Create a bi-directional pipe to connect the peers. object_t *parents [2] = {this, peer.socket}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {sndhwm, rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; int rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // Attach local end of the pipe to this socket object. attach_pipe (pipes [0], peer.options.identity); // Attach remote end of the pipe to the peer socket. Note that peer's // seqnum was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, pipes [1], options.identity, false); return 0; } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } // Create session. connect_session_t *session = new (std::nothrow) connect_session_t ( io_thread, this, options, protocol.c_str (), address.c_str ()); alloc_assert (session); // If 'immediate connect' feature is required, we'll create the pipes // to the session straight away. Otherwise, they'll be created by the // session once the connection is established. if (options.immediate_connect) { // Create a bi-directional pipe. object_t *parents [2] = {this, session}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {options.sndhwm, options.rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; int rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // Attach local end of the pipe to the socket object. attach_pipe (pipes [0], blob_t ()); // Attach remote end of the pipe to the session object later on. session->attach_pipe (pipes [1]); } // Activate the session. Make it a child of this socket. launch_child (session); return 0; }
int zmq::socket_base_t::connect (const char *addr_) { if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Parse addr_ string. std::string protocol; std::string address; int rc = parse_uri (addr_, protocol, address); if (rc != 0) return -1; rc = check_protocol (protocol); if (rc != 0) return -1; if (protocol == "inproc" || protocol == "sys") { // TODO: inproc connect is specific with respect to creating pipes // as there's no 'reconnect' functionality implemented. Once that // is in place we should follow generic pipe creation algorithm. // Find the peer endpoint. endpoint_t peer = find_endpoint (addr_); if (!peer.socket) return -1; // The total HWM for an inproc connection should be the sum of // the binder's HWM and the connector's HWM. int sndhwm; int rcvhwm; if (options.sndhwm == 0 || peer.options.rcvhwm == 0) sndhwm = 0; else sndhwm = options.sndhwm + peer.options.rcvhwm; if (options.rcvhwm == 0 || peer.options.sndhwm == 0) rcvhwm = 0; else rcvhwm = options.rcvhwm + peer.options.sndhwm; // Create a bi-directional pipe to connect the peers. object_t *parents [2] = {this, peer.socket}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {sndhwm, rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; int rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // Attach local end of the pipe to this socket object. attach_pipe (pipes [0]); // If required, send the identity of the local socket to the peer. if (options.send_identity) { msg_t id; rc = id.init_size (options.identity_size); zmq_assert (rc == 0); memcpy (id.data (), options.identity, options.identity_size); id.set_flags (msg_t::identity); bool written = pipes [0]->write (&id); zmq_assert (written); } // Attach remote end of the pipe to the peer socket. Note that peer's // seqnum was incremented in find_endpoint function. We don't need it // increased here. send_bind (peer.socket, pipes [1], false); return 0; } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } // Create session. session_base_t *session = session_base_t::create (io_thread, true, this, options, protocol.c_str (), address.c_str ()); errno_assert (session); // Create a bi-directional pipe. object_t *parents [2] = {this, session}; pipe_t *pipes [2] = {NULL, NULL}; int hwms [2] = {options.sndhwm, options.rcvhwm}; bool delays [2] = {options.delay_on_disconnect, options.delay_on_close}; rc = pipepair (parents, pipes, hwms, delays); errno_assert (rc == 0); // PGM does not support subscription forwarding; ask for all data to be // sent to this pipe. bool icanhasall = false; if (protocol == "pgm" || protocol == "epgm") icanhasall = true; // Attach local end of the pipe to the socket object. attach_pipe (pipes [0], icanhasall); // Attach remote end of the pipe to the session object later on. session->attach_pipe (pipes [1]); // Activate the session. Make it a child of this socket. launch_child (session); return 0; }
int zmq::socket_base_t::term_endpoint (const char *addr_) { scoped_optional_lock_t sync_lock(thread_safe ? &sync : NULL); // Check whether the library haven't been shut down yet. if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Check whether endpoint address passed to the function is valid. if (unlikely (!addr_)) { errno = EINVAL; return -1; } // Process pending commands, if any, since there could be pending unprocessed process_own()'s // (from launch_child() for example) we're asked to terminate now. int rc = process_commands (0, false); if (unlikely(rc != 0)) { return -1; } // Parse addr_ string. std::string protocol; std::string address; if (parse_uri(addr_, protocol, address) || check_protocol(protocol)) { return -1; } // Disconnect an inproc socket if (protocol == "inproc") { if (unregister_endpoint (std::string(addr_), this) == 0) { return 0; } std::pair <inprocs_t::iterator, inprocs_t::iterator> range = inprocs.equal_range (std::string (addr_)); if (range.first == range.second) { errno = ENOENT; return -1; } for (inprocs_t::iterator it = range.first; it != range.second; ++it) it->second->terminate (true); inprocs.erase (range.first, range.second); return 0; } std::string resolved_addr = std::string (addr_); std::pair <endpoints_t::iterator, endpoints_t::iterator> range; // The resolved last_endpoint is used as a key in the endpoints map. // The address passed by the user might not match in the TCP case due to // IPv4-in-IPv6 mapping (EG: tcp://[::ffff:127.0.0.1]:9999), so try to // resolve before giving up. Given at this stage we don't know whether a // socket is connected or bound, try with both. if (protocol == "tcp") { range = endpoints.equal_range (resolved_addr); if (range.first == range.second) { tcp_address_t *tcp_addr = new (std::nothrow) tcp_address_t (); alloc_assert (tcp_addr); rc = tcp_addr->resolve (address.c_str (), false, options.ipv6); if (rc == 0) { tcp_addr->to_string (resolved_addr); range = endpoints.equal_range (resolved_addr); if (range.first == range.second) { rc = tcp_addr->resolve (address.c_str (), true, options.ipv6); if (rc == 0) { tcp_addr->to_string (resolved_addr); } } } LIBZMQ_DELETE(tcp_addr); } } // Find the endpoints range (if any) corresponding to the addr_ string. range = endpoints.equal_range (resolved_addr); if (range.first == range.second) { errno = ENOENT; return -1; } for (endpoints_t::iterator it = range.first; it != range.second; ++it) { // If we have an associated pipe, terminate it. if (it->second.second != NULL) it->second.second->terminate (false); term_child (it->second.first); } endpoints.erase (range.first, range.second); return 0; }
int zmq::socket_base_t::bind (const char *addr_) { scoped_optional_lock_t sync_lock(thread_safe ? &sync : NULL); if (unlikely (ctx_terminated)) { errno = ETERM; return -1; } // Process pending commands, if any. int rc = process_commands (0, false); if (unlikely (rc != 0)) { return -1; } // Parse addr_ string. std::string protocol; std::string address; if (parse_uri (addr_, protocol, address) || check_protocol (protocol)) { return -1; } if (protocol == "inproc") { const endpoint_t endpoint = { this, options }; rc = register_endpoint (addr_, endpoint); if (rc == 0) { connect_pending (addr_, this); last_endpoint.assign (addr_); options.connected = true; } return rc; } if (protocol == "pgm" || protocol == "epgm" || protocol == "norm") { // For convenience's sake, bind can be used interchangeable with // connect for PGM, EPGM, NORM transports. rc = connect (addr_); if (rc != -1) options.connected = true; return rc; } if (protocol == "udp") { if (!(options.type == ZMQ_DGRAM || options.type == ZMQ_DISH)) { errno = ENOCOMPATPROTO; return -1; } // Choose the I/O thread to run the session in. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } address_t *paddr = new (std::nothrow) address_t (protocol, address, this->get_ctx ()); alloc_assert (paddr); paddr->resolved.udp_addr = new (std::nothrow) udp_address_t (); alloc_assert (paddr->resolved.udp_addr); rc = paddr->resolved.udp_addr->resolve (address.c_str(), true); if (rc != 0) { LIBZMQ_DELETE(paddr); return -1; } session_base_t *session = session_base_t::create (io_thread, true, this, options, paddr); errno_assert (session); pipe_t *newpipe = NULL; // Create a bi-directional pipe. object_t *parents [2] = {this, session}; pipe_t *new_pipes [2] = {NULL, NULL}; int hwms [2] = {options.sndhwm, options.rcvhwm}; bool conflates [2] = {false, false}; rc = pipepair (parents, new_pipes, hwms, conflates); errno_assert (rc == 0); // Attach local end of the pipe to the socket object. attach_pipe (new_pipes [0], true); newpipe = new_pipes [0]; // Attach remote end of the pipe to the session object later on. session->attach_pipe (new_pipes [1]); // Save last endpoint URI paddr->to_string (last_endpoint); add_endpoint (addr_, (own_t *) session, newpipe); return 0; } // Remaining transports require to be run in an I/O thread, so at this // point we'll choose one. io_thread_t *io_thread = choose_io_thread (options.affinity); if (!io_thread) { errno = EMTHREAD; return -1; } if (protocol == "tcp") { tcp_listener_t *listener = new (std::nothrow) tcp_listener_t ( io_thread, this, options); alloc_assert (listener); rc = listener->set_address (address.c_str ()); if (rc != 0) { LIBZMQ_DELETE(listener); event_bind_failed (address, zmq_errno()); return -1; } // Save last endpoint URI listener->get_address (last_endpoint); add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL); options.connected = true; return 0; } #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS if (protocol == "ipc") { ipc_listener_t *listener = new (std::nothrow) ipc_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { LIBZMQ_DELETE(listener); event_bind_failed (address, zmq_errno()); return -1; } // Save last endpoint URI listener->get_address (last_endpoint); add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL); options.connected = true; return 0; } #endif #if defined ZMQ_HAVE_TIPC if (protocol == "tipc") { tipc_listener_t *listener = new (std::nothrow) tipc_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { LIBZMQ_DELETE(listener); event_bind_failed (address, zmq_errno()); return -1; } // Save last endpoint URI listener->get_address (last_endpoint); add_endpoint (addr_, (own_t *) listener, NULL); options.connected = true; return 0; } #endif #if defined ZMQ_HAVE_VMCI if (protocol == "vmci") { vmci_listener_t *listener = new (std::nothrow) vmci_listener_t ( io_thread, this, options); alloc_assert (listener); int rc = listener->set_address (address.c_str ()); if (rc != 0) { LIBZMQ_DELETE(listener); event_bind_failed (address, zmq_errno ()); return -1; } listener->get_address (last_endpoint); add_endpoint (last_endpoint.c_str(), (own_t *) listener, NULL); options.connected = true; return 0; } #endif zmq_assert (false); return -1; }
int main(void) { int init_res; puts("\nRIOT netdev test"); dev = NETDEV_DEFAULT; if (dev == NULL) { puts("Default device was NULL"); return 1; } printf("Initialized dev "); switch (dev->type) { case NETDEV_TYPE_UNKNOWN: printf("of unknown type\n"); break; case NETDEV_TYPE_BASE: printf("as basic device\n"); break; default: printf("of undefined type\n"); break; } if (dev->driver == NULL) { puts("Default driver is defined as NULL!"); return 1; } dev->driver->init(dev); if (!(init_res = init_channel())) { return 1; } if (check_channel() == 0) { printf("Channel is not as expected. "); if (init_res == 2) { printf("But initialization is not supported. Continuing.\n"); } else { printf("Aborting\n"); return 1; } } if (!(init_res = init_address())) { return 1; } if (check_address() == 0) { printf("Address is not as expected. "); if (init_res == 2) { printf("But initialization is not supported. Continuing.\n"); } else { printf("Aborting\n"); return 1; } } if (!(init_res = init_long_address())) { return 1; } if (check_long_address() == 0) { printf("Long address is not as expected. "); if (init_res == 2) { printf("But initialization is not supported. Continuing.\n"); } else { printf("Aborting\n"); return 1; } } if (!(init_res = init_nid())) { return 1; } if (check_nid() == 0) { printf("Network ID is not as expected. "); if (init_res == 2) { printf("But initialization is not supported. Continuing.\n"); } else { printf("Aborting\n"); return 1; } } if (check_max_packet_size() == 0) { return 1; } if (check_protocol() == 0) { return 1; } if (!init_state()) { return 1; } if (!check_state()) { return 1; } #ifdef SENDER if (!send_packet()) { return 1; } #elif RECEIVER if (!init_receiver_callback()) { return 1; } #endif return 0; }