int ACE_SOCK_Acceptor::accept (ACE_SOCK_Stream &new_stream, ACE_Addr *remote_addr, ACE_Time_Value *timeout, int restart, int reset_new_handle) const { ACE_TRACE ("ACE_SOCK_Acceptor::accept"); int in_blocking_mode = 0; if (this->shared_accept_start (timeout, restart, in_blocking_mode) == -1) return -1; else { // On Win32 the third parameter to <accept> must be a NULL // pointer if we want to ignore the client's address. int *len_ptr = 0; sockaddr *addr = 0; int len = 0; if (remote_addr != 0) { len = remote_addr->get_size (); len_ptr = &len; addr = (sockaddr *) remote_addr->get_addr (); } do new_stream.set_handle (ACE_OS::accept (this->get_handle (), addr, len_ptr)); while (new_stream.get_handle () == ACE_INVALID_HANDLE && restart != 0 && errno == EINTR && timeout == 0); // Reset the size of the addr, so the proper UNIX/IPv4/IPv6 family // is known. if (new_stream.get_handle () != ACE_INVALID_HANDLE && remote_addr != 0) { remote_addr->set_size (len); if (addr) remote_addr->set_type (addr->sa_family); } } return this->shared_accept_finish (new_stream, in_blocking_mode, reset_new_handle); }
int ACE_SOCK_Acceptor::accept (ACE_SOCK_Stream &new_stream, ACE_Accept_QoS_Params qos_params, ACE_Addr *remote_addr, ACE_Time_Value *timeout, int restart, int reset_new_handle) const { ACE_TRACE ("ACE_SOCK_Acceptor::accept"); int in_blocking_mode = 0; if (this->shared_accept_start (timeout, restart, in_blocking_mode) == -1) return -1; else { // On Win32 the third parameter to <accept> must be a NULL // pointer if we want to ignore the client's address. int *len_ptr = 0; int len = 0; sockaddr *addr = 0; if (remote_addr != 0) { len = remote_addr->get_size (); len_ptr = &len; addr = (sockaddr *) remote_addr->get_addr (); } do new_stream.set_handle (ACE_OS::accept (this->get_handle (), addr, len_ptr, qos_params)); while (new_stream.get_handle () == ACE_INVALID_HANDLE && restart != 0 && errno == EINTR && timeout == 0); // Reset the size of the addr, which is only necessary for UNIX // domain sockets. if (new_stream.get_handle () != ACE_INVALID_HANDLE && remote_addr != 0) remote_addr->set_size (len); } return this->shared_accept_finish (new_stream, in_blocking_mode, reset_new_handle); }
int ACE_SOCK_Connector::complete (ACE_SOCK_Stream &new_stream, ACE_Addr *remote_sap, const ACE_Time_Value *tv) { ACE_TRACE ("ACE_SOCK_Connector::complete"); ACE_HANDLE h = ACE::handle_timed_complete (new_stream.get_handle (), tv); // We failed to get connected. if (h == ACE_INVALID_HANDLE) { #if defined (ACE_WIN32) // Win32 has a timing problem - if you check to see if the // connection has completed too fast, it will fail - so wait // <ACE_NON_BLOCKING_BUG_DELAY> microseconds to let it catch up // then retry to see if it's a real failure. ACE_Time_Value time (0, ACE_NON_BLOCKING_BUG_DELAY); ACE_OS::sleep (time); h = ACE::handle_timed_complete (new_stream.get_handle (), tv); if (h == ACE_INVALID_HANDLE) { #endif /* ACE_WIN32 */ // Save/restore errno. ACE_Errno_Guard error (errno); new_stream.close (); return -1; #if defined (ACE_WIN32) } #endif /* ACE_WIN32 */ } if (remote_sap != 0) { int len = remote_sap->get_size (); sockaddr *addr = reinterpret_cast<sockaddr *> (remote_sap->get_addr ()); if (ACE_OS::getpeername (h, addr, &len) == -1) { // Save/restore errno. ACE_Errno_Guard error (errno); new_stream.close (); return -1; } } // Start out with non-blocking disabled on the <new_stream>. new_stream.disable (ACE_NONBLOCK); return 0; }
/* fence: 1) send the heartbeat message to repo at regular basis. 1/1 minute, get back the messages, calculate the digest, and send to localhost:9907 2)listens on localhost:9901, for the incoming raw inputs, calculate the digest, and send it to hub:10007 */ int ACE_TMAIN(int argc, ACE_TCHAR* argv[]){ ACE_SOCK_Acceptor _9901acceptor; ACE_INET_Addr _9901addr(9901); //create the acceptor if(_9901acceptor.open(_9901addr,1)==-1){ ACE_ERROR_RETURN((LM_ERROR, "%p\n","open"),1); }else if(_9901acceptor.get_local_addr(_9901addr)== -1){ ACE_ERROR_RETURN((LM_ERROR, "%p\n","get_local_addr"),1); } ACE_DEBUG((LM_INFO, "(%P|%t) starting server at port %d\n", _9901addr.get_port_number())); // ACE_INET_Addr repo_addr(repo_port,repo_host.c_str()); ACE_SOCK_Connector con; // ACE_SOCK_Stream cli_stream ; ACE_Thread_Manager* mgr = ACE_Thread_Manager::instance(); // if(con.connect(cli_stream,repo_addr)==-1){ // ACE_ERROR_RETURN((LM_ERROR, // "(%P|%t:%l) %p\n","connection failed"),0); // }else{ // ACE_DEBUG((LM_DEBUG, // "(%P|%t) connected to %s at port %d\n",repo_addr.get_host_name(),repo_addr.get_port_number())); // } /*connector side; do in a seperate thread; */ if(mgr->spawn(fetch_step2, 0, THR_DETACHED) == -1){ ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n", "spawn")); } /* run the accept loop ; */ do{ ACE_SOCK_Stream stream; if(_9901acceptor.accept(stream)== -1){ ACE_ERROR_RETURN((LM_ERROR, "(%P|%t:%l) %p\n","accept failed"),0); }else{ ACE_DEBUG((LM_DEBUG, "(%P|%t:%l) connected to %s at port %d\n",_9901addr.get_host_name(),_9901addr.get_port_number())); } if(mgr->spawn(accept_step1, reinterpret_cast<void *> (stream.get_handle()), THR_DETACHED) == -1){ ACE_ERROR ((LM_ERROR, "(%P|%t) %p\n", "spawn")); } }while(true); return 0; }
int ACE_SOCK_Acceptor::shared_accept_finish (ACE_SOCK_Stream new_stream, int in_blocking_mode, int reset_new_handle) const { ACE_TRACE ("ACE_SOCK_Acceptor::shared_accept_finish ()"); ACE_HANDLE new_handle = new_stream.get_handle (); // Check to see if we were originally in blocking mode, and if so, // set the <new_stream>'s handle and <this> handle to be in blocking // mode. if (in_blocking_mode) { // Save/restore errno. ACE_Errno_Guard error (errno); // Only disable ACE_NONBLOCK if we weren't in non-blocking mode // originally. ACE::clr_flags (this->get_handle (), ACE_NONBLOCK); ACE::clr_flags (new_handle, ACE_NONBLOCK); } #if defined (ACE_HAS_WINSOCK2) && (ACE_HAS_WINSOCK2 != 0) if (reset_new_handle) // Reset the event association inherited by the new handle. ::WSAEventSelect ((SOCKET) new_handle, 0, 0); #else ACE_UNUSED_ARG (reset_new_handle); #endif /* ACE_WIN32 */ return new_handle == ACE_INVALID_HANDLE ? -1 : 0; }
int ACE_SOCK_Connector::connect (ACE_SOCK_Stream &new_stream, const ACE_Addr &remote_sap, const ACE_Time_Value *timeout, const ACE_Addr &local_sap, int reuse_addr, int /* flags */, int /* perms */, int protocol) { ACE_TRACE ("ACE_SOCK_Connector::connect"); if (this->shared_open (new_stream, remote_sap.get_type (), protocol, reuse_addr) == -1) return -1; else if (this->shared_connect_start (new_stream, timeout, local_sap) == -1) return -1; int result = ACE_OS::connect (new_stream.get_handle (), reinterpret_cast<sockaddr *> (remote_sap.get_addr ()), remote_sap.get_size ()); return this->shared_connect_finish (new_stream, timeout, result); }
int ACE_TMAIN(int argc, ACE_TCHAR* argv[]){ ACE_SOCK_Acceptor acceptor; ACE_INET_Addr addr(10009); if(acceptor.open(addr,1)){ ACE_ERROR_RETURN((LM_ERROR, "%p\n", "open"),1); } else if(acceptor.get_local_addr(addr) == -1){ ACE_ERROR_RETURN((LM_ERROR, "%p\n", "get_local_addr"),1); } ACE_DEBUG((LM_INFO, "(%P|%t) starting server at port %d\n",addr.get_port_number())); ACE_Thread_Manager* mgr = ACE_Thread_Manager::instance(); while(true){ ACE_SOCK_Stream stream; if(acceptor.accept(stream) == -1){ ACE_ERROR((LM_ERROR, "%p\n","accept")); continue; }else{ ACE_DEBUG((LM_DEBUG, "(%P|%t) spawning one thread\n")); handle_input(mgr, commu, stream.get_handle()); } } return 0; }
int ACE_SOCK_Connector::shared_connect_start (ACE_SOCK_Stream &new_stream, const ACE_Time_Value *timeout, const ACE_Addr &local_sap) { ACE_TRACE ("ACE_SOCK_Connector::shared_connect_start"); if (local_sap != ACE_Addr::sap_any) { sockaddr *laddr = reinterpret_cast<sockaddr *> (local_sap.get_addr ()); int size = local_sap.get_size (); if (ACE_OS::bind (new_stream.get_handle (), laddr, size) == -1) { // Save/restore errno. ACE_Errno_Guard error (errno); new_stream.close (); return -1; } } // Enable non-blocking, if required. if (timeout != 0 && new_stream.enable (ACE_NONBLOCK) == -1) return -1; else return 0; }
int Synch_Thread_Pool_Task::svc (void) { // Creates a factory of HTTP_Handlers binding to synchronous I/O strategy Synch_HTTP_Handler_Factory factory; for (;;) { ACE_SOCK_Stream stream; // Lock in this accept. When it returns, we have a connection. if (this->acceptor_.accept (stream) == -1) ACE_ERROR_RETURN ((LM_ERROR, "%p\n", "HTTP_Acceptor::accept"), -1); ACE_Message_Block *mb; ACE_NEW_RETURN (mb, ACE_Message_Block (HTTP_Handler::MAX_REQUEST_SIZE + 1), -1); // Create an HTTP Handler to handle this request HTTP_Handler *handler = factory.create_http_handler (); handler->open (stream.get_handle (), *mb); // Handler is destroyed when the I/O puts the Handler into the // done state. mb->release (); ACE_DEBUG ((LM_DEBUG, " (%t) in Synch_Thread_Pool_Task::svc, recycling\n")); } ACE_NOTREACHED(return 0); }
int ACE_SOCK_Connector::connect (ACE_SOCK_Stream &new_stream, const ACE_Addr &remote_sap, ACE_QoS_Params qos_params, const ACE_Time_Value *timeout, const ACE_Addr &local_sap, ACE_Protocol_Info * protocolinfo, ACE_SOCK_GROUP g, u_long flags, int reuse_addr, int /* perms */) { ACE_TRACE ("ACE_SOCK_Connector::connect"); if (this->shared_open (new_stream, remote_sap.get_type (), 0, protocolinfo, g, flags, reuse_addr) == -1) return -1; else if (this->shared_connect_start (new_stream, timeout, local_sap) == -1) return -1; int result = ACE_OS::connect (new_stream.get_handle (), reinterpret_cast<sockaddr *> (remote_sap.get_addr ()), remote_sap.get_size (), qos_params); return this->shared_connect_finish (new_stream, timeout, result); }
int Pipe::open (void) { ACE_INET_Addr my_addr; ACE_SOCK_Acceptor acceptor; ACE_SOCK_Connector connector; ACE_SOCK_Stream reader; ACE_SOCK_Stream writer; int result = 0; // Bind listener to any port and then find out what the port was. if (acceptor.open (ACE_Addr::sap_any) == -1 || acceptor.get_local_addr (my_addr) == -1) result = -1; else { int af = my_addr.get_type (); const ACE_TCHAR *local = ACE_LOCALHOST; #if defined (ACE_HAS_IPV6) if (af == AF_INET6) local = ACE_IPV6_LOCALHOST; #endif /* ACE_HAS_IPV6 */ ACE_INET_Addr sv_addr (my_addr.get_port_number (), local, af); // Establish a connection within the same process. if (connector.connect (writer, sv_addr) == -1) result = -1; else if (acceptor.accept (reader) == -1) { writer.close (); result = -1; } } // Close down the acceptor endpoint since we don't need it anymore. acceptor.close (); if (result == -1) return -1; this->handles_[0] = reader.get_handle (); this->handles_[1] = writer.get_handle (); return 0; }
int HTTP_Server::thread_per_request (HTTP_Handler_Factory &factory) { int grp_id = -1; // thread per request // Main thread opens the acceptor if (this->acceptor_.open (ACE_INET_Addr (this->port_), 1, PF_INET, this->backlog_) == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("HTTP_Acceptor::open")), -1); ACE_SOCK_Stream stream; // When we are throttling, this is the amount of time to wait before // checking for runnability again. const ACE_Time_Value wait_time (0, 10); for (;;) { if (this->acceptor_.accept (stream) == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("HTTP_Acceptor::accept")), -1); Thread_Per_Request_Task *t; // Pass grp_id as a constructor param instead of into open. ACE_NEW_RETURN (t, Thread_Per_Request_Task (stream.get_handle (), this->tm_, grp_id, factory), -1); if (t->open () != 0) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("Thread_Per_Request_Task::open")), -1); // Throttling is not allowing too many threads to run away. // Should really use some sort of condition variable here. if (!this->throttle_) continue; // This works because each task has only one thread. while (this->tm_.num_tasks_in_group (grp_id) > this->threads_) this->tm_.wait (&wait_time); } ACE_NOTREACHED(return 0); }
static ACE_THR_FUNC_RETURN worker (void *) { ACE_OS::sleep (3); const ACE_TCHAR *msg = ACE_TEXT ("Message from Connection worker"); ACE_TCHAR buf [BUFSIZ]; buf[0] = static_cast<ACE_TCHAR> ((ACE_OS::strlen (msg) + 1)); ACE_OS::strcpy (&buf[1], msg); ACE_INET_Addr addr (rendezvous); ACE_DEBUG((LM_DEBUG, "(%t) Spawning %d client threads...\n", cli_thrno)); int grp = ACE_Thread_Manager::instance ()->spawn_n (cli_thrno, &cli_worker, buf); ACE_TEST_ASSERT (grp != -1); ACE_Thread_Manager::instance ()->wait_grp (grp); ACE_DEBUG ((LM_DEBUG, "(%t) Client threads done; shutting down...\n")); ACE_SOCK_Stream stream; ACE_SOCK_Connector connect; if (connect.connect (stream, addr) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) %p Error while connecting\n"), ACE_TEXT ("connect"))); const ACE_TCHAR *sbuf = ACE_TEXT ("\011shutdown"); ACE_DEBUG ((LM_DEBUG, "shutdown stream handle = %x\n", stream.get_handle ())); if (stream.send_n (sbuf, (ACE_OS::strlen (sbuf) + 1) * sizeof (ACE_TCHAR)) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) %p\n"), ACE_TEXT ("send_n"))); ACE_DEBUG ((LM_DEBUG, "Sent message of length = %d\n", ACE_OS::strlen (sbuf))); stream.close (); return 0; }
int SOYALDevice::make_handler(KSGDeviceNode* node,ACE_HANDLE* handler) { if(!node) return -1; std::string ip = node->GetDevAddr().GetConnect(); int port = node->GetDevAddr().GetPort(); ACE_INET_Addr addr(port,ip.c_str()); ACE_SOCK_Connector conn; ACE_SOCK_Stream stream; ACE_Time_Value tv = KSGGetTaskTimeoutIntval(); int err_code; ACE_DEBUG((LM_TRACE,"开始连接soyal控制器,[%s][%s]",node->get_name().c_str(),ip.c_str())); if(conn.connect(stream,addr,&tv)) { err_code = ACE_OS::last_error(); // TODO: 返回连接的错误码 if(EWOULDBLOCK == err_code) { ACE_DEBUG((LM_ERROR,"连接控制器失败")); } else if(EHOSTUNREACH == err_code || ENETUNREACH == err_code) { ACE_DEBUG((LM_ERROR,"无法连接设备主机")); node->SetState(KSGDeviceNode::dsError); } else { ACE_DEBUG((LM_ERROR,"连接主机未知错误![%d][%s]ip[%s]" ,err_code,ACE_OS::strerror(err_code),ip.c_str())); } // add by cash 释放 SOCKET // 2007-01-29 stream.close(); return -1; } // 设置 handler 为 BLOCK 的 // stream.disable(ACE_NONBLOCK); // 设置 linger 属性 struct linger lg; ACE_OS::memset(&lg,0,sizeof lg); lg.l_onoff = 1; // 3s lg.l_linger = 3; stream.set_option(SOL_SOCKET,SO_LINGER,&lg,sizeof lg); node->SetState(KSGDeviceNode::dsOnline); *handler = stream.get_handle(); return 0; }
int ACE_SOCK_Connector::shared_open (ACE_SOCK_Stream &new_stream, int protocol_family, int protocol, int reuse_addr) { ACE_TRACE ("ACE_SOCK_Connector::shared_open"); // Only open a new socket if we don't already have a valid handle. if (new_stream.get_handle () == ACE_INVALID_HANDLE && new_stream.open (SOCK_STREAM, protocol_family, protocol, reuse_addr) == -1) return -1; else return 0; }
static ACE_THR_FUNC_RETURN cli_worker (void *arg) { // Client thread function. ACE_INET_Addr addr (rendezvous); ACE_SOCK_Stream stream; ACE_SOCK_Connector connect; ACE_Time_Value delay (0, req_delay); size_t len = * reinterpret_cast<ACE_TCHAR *> (arg); for (size_t i = 0 ; i < cli_conn_no; i++) { if (connect.connect (stream, addr) < 0) { ACE_ERROR ((LM_ERROR, "(%t) %p\n", "connect")); continue; } for (size_t j = 0; j < cli_req_no; j++) { ACE_DEBUG ((LM_DEBUG, "(%t) conn_worker handle 0x%x, req %d\n", stream.get_handle (), j+1)); if (stream.send_n (arg, (len + 1) * sizeof (ACE_TCHAR)) == -1) { ACE_ERROR ((LM_ERROR, "(%t) %p\n", "send_n")); continue; } ACE_OS::sleep (delay); } stream.close (); } return 0; }
void send_work_to_server(ACE_TCHAR* arg) { ACE_SOCK_Stream stream; ACE_SOCK_Connector connect; ACE_Time_Value delay (0, req_delay); size_t len = * reinterpret_cast<ACE_TCHAR *> (arg); for (size_t i = 0 ; i < cli_conn_no; i++) { if (connect.connect (stream, addr_) < 0) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) %p\n"), ACE_TEXT ("connect"))); continue; } for (size_t j = 0; j < cli_req_no; j++) { ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Sending work to server on handle 0x%x, req %d\n"), stream.get_handle (), j+1)); if (stream.send_n (arg, (len + 1) * sizeof (ACE_TCHAR)) == -1) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) %p\n"), ACE_TEXT ("send_n"))); continue; } ACE_OS::sleep (delay); } stream.close (); } }
int ACE_SOCK_Connector::shared_connect_start (ACE_SOCK_Stream &new_stream, const ACE_Time_Value *timeout, const ACE_Addr &local_sap) { ACE_TRACE ("ACE_SOCK_Connector::shared_connect_start"); if (local_sap != ACE_Addr::sap_any) { sockaddr *laddr = ACE_reinterpret_cast (sockaddr *, local_sap.get_addr ()); int size = local_sap.get_size (); if (ACE_OS::bind (new_stream.get_handle (), laddr, size) == -1) { // Save/restore errno. ACE_Errno_Guard error (errno); new_stream.close (); return -1; } }
void shut_down() { ACE_SOCK_Stream stream; ACE_SOCK_Connector connect; if (connect.connect (stream, addr_) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) %p Error while connecting\n"), ACE_TEXT ("connect"))); const ACE_TCHAR *sbuf = ACE_TEXT ("\011shutdown"); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("shutdown stream handle = %x\n"), stream.get_handle ())); if (stream.send_n (sbuf, (ACE_OS::strlen (sbuf) + 1) * sizeof (ACE_TCHAR)) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%t) %p\n"), ACE_TEXT ("send_n"))); stream.close (); }
int ACEEngine::AddConnector(int connectorIndex, char* szIP, unsigned short port) { ACE_SOCK_Stream* stream = new ACE_SOCK_Stream(); ACE_INET_Addr connectAddr(port, szIP); ACE_SOCK_Connector connector; int result = connector.connect(*stream, connectAddr); if (-1 == result) return -1; _SessionDesc sessionDesc; sessionDesc.identifier = connectorIndex; sessionDesc.sessionType = SESSION_TYPE::SESSION_CONNECTOR; ProactorService* pService = new ProactorService(); pService->SetOwner(this); pService->SetSessionDesc(sessionDesc); ACE_Message_Block mb; pService->open(stream->get_handle(), mb); delete stream; stream = NULL; return pService->GetSerial(); }
int ACE_Pipe::open (int buffer_size) { ACE_TRACE ("ACE_Pipe::open"); #if defined (ACE_LACKS_SOCKETPAIR) ACE_INET_Addr my_addr; ACE_SOCK_Acceptor acceptor; ACE_SOCK_Connector connector; ACE_SOCK_Stream reader; ACE_SOCK_Stream writer; int result = 0; # if defined (ACE_WIN32) ACE_INET_Addr local_any (static_cast<u_short> (0), ACE_LOCALHOST); # else ACE_Addr local_any = ACE_Addr::sap_any; # endif /* ACE_WIN32 */ // Bind listener to any port and then find out what the port was. if (acceptor.open (local_any) == -1 || acceptor.get_local_addr (my_addr) == -1) result = -1; else { ACE_INET_Addr sv_addr (my_addr.get_port_number (), ACE_LOCALHOST); // Establish a connection within the same process. if (connector.connect (writer, sv_addr) == -1) result = -1; else if (acceptor.accept (reader) == -1) { writer.close (); result = -1; } } // Close down the acceptor endpoint since we don't need it anymore. acceptor.close (); if (result == -1) return -1; this->handles_[0] = reader.get_handle (); this->handles_[1] = writer.get_handle (); # if !defined (ACE_LACKS_TCP_NODELAY) int one = 1; // Make sure that the TCP stack doesn't try to buffer small writes. // Since this communication is purely local to the host it doesn't // affect network performance. if (writer.set_option (ACE_IPPROTO_TCP, TCP_NODELAY, &one, sizeof one) == -1) { this->close (); return -1; } # endif /* ! ACE_LACKS_TCP_NODELAY */ # if defined (ACE_LACKS_SO_RCVBUF) && defined (ACE_LACKS_SO_SNDBUF) ACE_UNUSED_ARG (buffer_size); # endif # if !defined (ACE_LACKS_SO_RCVBUF) if (reader.set_option (SOL_SOCKET, SO_RCVBUF, reinterpret_cast <void *> (&buffer_size), sizeof (buffer_size)) == -1 && errno != ENOTSUP) { this->close (); return -1; } # endif /* !ACE_LACKS_SO_RCVBUF */ # if !defined (ACE_LACKS_SO_SNDBUF) if (writer.set_option (SOL_SOCKET, SO_SNDBUF, reinterpret_cast <void *> (&buffer_size), sizeof (buffer_size)) == -1 && errno != ENOTSUP) { this->close (); return -1; } # endif /* !ACE_LACKS_SO_SNDBUF */ #elif defined (ACE_HAS_STREAM_PIPES) || defined (__QNX__) ACE_UNUSED_ARG (buffer_size); if (ACE_OS::pipe (this->handles_) == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("pipe")), -1); #if !defined(__QNX__) int arg = RMSGN; // Enable "msg no discard" mode, which ensures that record // boundaries are maintained when messages are sent and received. if (ACE_OS::ioctl (this->handles_[0], I_SRDOPT, (void *) arg) == -1 || ACE_OS::ioctl (this->handles_[1], I_SRDOPT, (void *) arg) == -1) { this->close (); ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("ioctl")), -1); } #endif /* __QNX__ */ #else /* ! ACE_LACKS_SOCKETPAIR && ! ACE_HAS_STREAM_PIPES */ if (ACE_OS::socketpair (AF_UNIX, SOCK_STREAM, 0, this->handles_) == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("socketpair")), -1); # if defined (ACE_LACKS_SO_SNDBUF) && defined (ACE_LACKS_SO_RCVBUF) ACE_UNUSED_ARG (buffer_size); # endif # if !defined (ACE_LACKS_SO_RCVBUF) if (ACE_OS::setsockopt (this->handles_[0], SOL_SOCKET, SO_RCVBUF, reinterpret_cast <const char *> (&buffer_size), sizeof (buffer_size)) == -1 && errno != ENOTSUP) { this->close (); return -1; } # endif # if !defined (ACE_LACKS_SO_SNDBUF) if (ACE_OS::setsockopt (this->handles_[1], SOL_SOCKET, SO_SNDBUF, reinterpret_cast <const char *> (&buffer_size), sizeof (buffer_size)) == -1 && errno != ENOTSUP) { this->close (); return -1; } # endif /* ! ACE_LACKS_SO_SNDBUF */ # if defined (ACE_OPENVMS) && !defined (ACE_LACKS_TCP_NODELAY) int one = 1; // OpenVMS implements socketpair(AF_UNIX...) by returning AF_INET sockets. // Since these are plagued by Nagle as any other INET socket we need to set // TCP_NODELAY on the write handle. if (ACE_OS::setsockopt (this->handles_[1], ACE_IPPROTO_TCP, TCP_NODELAY, reinterpret_cast <const char *> (&one), sizeof (one)) == -1) { this->close (); return -1; } # endif /* ACE_OPENVMS && !ACE_LACKS_TCP_NODELAY */ #endif /* ! ACE_LACKS_SOCKETPAIR && ! ACE_HAS_STREAM_PIPES */ // Point both the read and write HANDLES to the appropriate socket // HANDLEs. return 0; }
static int succeed_nonblocking (void) { ACE_TCHAR test_host[MAXHOSTNAMELEN], test_addr[MAXHOSTNAMELEN + 8]; int status; ACE_INET_Addr echo_server; ACE_SOCK_Connector con; ACE_SOCK_Stream sock; ACE_Time_Value nonblock (0, 0); u_short test_port = 7; // Echo find_another_host (test_host); if (ACE_OS::strcmp (ACE_TEXT ("localhost"), test_host) == 0) { #if defined (ACE_WIN32) test_port = 80; // Echo not available on Win32; try web server #endif /* ACE_WIN32 */ } if (echo_server.set (test_port, test_host) == -1) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("Host lookup for %s %p\n"), test_host, ACE_TEXT ("failed"))); return -1; } echo_server.addr_to_string (test_addr, MAXHOSTNAMELEN + 8); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Testing to host \"%s\", port %d (%s)\n"), test_host, test_port, test_addr)); status = con.connect (sock, echo_server, &nonblock); // Need to test the call to 'complete' really. if (status == 0 || (status == -1 && errno != EWOULDBLOCK)) { ACE_DEBUG((LM_WARNING, ACE_TEXT ("Immediate success/fail; test not completed\n"))); status = 0; } else { if (sock.get_handle () != ACE_INVALID_HANDLE) { status = con.complete (sock); } if (status == -1) { // Reset the status _before_ doing the printout, in case the // printout overwrites errno. if (errno == ECONNREFUSED) { status = 0; ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Should succeed, but refused: ok\n"))); } else { ACE_ERROR ((LM_ERROR, ACE_TEXT("Errno <%d>: %p\n"), ACE_ERRNO_GET, ACE_TEXT("connect should succeed, but"))); } } else ACE_DEBUG((LM_DEBUG, ACE_TEXT("Connect which should succeed, did\n"))); } // Just in case. sock.close (); return status; }
static int fail_no_listener_nonblocking (void) { ACE_TCHAR test_host[MAXHOSTNAMELEN], test_addr[MAXHOSTNAMELEN + 8]; int status; ACE_INET_Addr nobody_home; ACE_SOCK_Connector con; ACE_SOCK_Stream sock; ACE_Time_Value nonblock (0, 0); find_another_host (test_host); if (nobody_home.set ((u_short) 42000, test_host) == -1) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("Host lookup for %s %p\n"), test_host, ACE_TEXT ("failed"))); return -1; } nobody_home.addr_to_string (test_addr, MAXHOSTNAMELEN + 8); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Testing to host \"%s\" (%s)\n"), test_host, test_addr)); status = con.connect (sock, nobody_home, &nonblock); // Need a port that will fail. if (status == 0) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("Connect which should fail didn't\n"))); status = -1; } // On some systems, a failed connect to localhost will return // ECONNREFUSED or ENETUNREACH directly, instead of // EWOULDBLOCK. That is also fine. else if (errno == EWOULDBLOCK || errno == ECONNREFUSED || errno == ENETUNREACH) { if (sock.get_handle () != ACE_INVALID_HANDLE) status = con.complete (sock); if (status != -1) { ACE_ERROR ((LM_ERROR, ACE_TEXT ("Connect which should fail didn't\n"))); status = -1; } else { ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("%p\n"), ACE_TEXT ("Proper fail"))); status = 0; } } else { ACE_DEBUG ((LM_WARNING, ACE_TEXT ("Test not executed fully; ") ACE_TEXT ("expected EWOULDBLOCK, %p (%d)\n"), ACE_TEXT ("not"), ACE_ERRNO_GET)); status = -1; } // Just in case. sock.close (); return status; }
int Accept_Handler::handle_input (ACE_HANDLE h) { ACE::HTBP::Channel **ch = 0; if (h == acceptor_.get_handle()) { ACE_SOCK_Stream *sock = new ACE_SOCK_Stream; acceptor_.accept(*sock); ch = channels_[0] == 0 ? &channels_[0] :& channels_[1]; *ch = new ACE::HTBP::Channel(*sock); this->reactor()->register_handler (sock->get_handle(), this, ACE_Event_Handler::READ_MASK); return 0; } for (int i = 0; i < 2; i++) if (channels_[i] && channels_[i]->get_handle() == h) { ch = &channels_[i]; break; } if (ch == 0) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("(%P|%t) Server Accept_Handler::handle_input, ") ACE_TEXT ("unknown handle %d\n") ,h), -1); int result = (*ch)->pre_recv(); if (result == 0) { this->reactor()->remove_handler (h, ACE_Event_Handler::READ_MASK | ACE_Event_Handler::DONT_CALL); (*ch)->register_notifier(this->reactor()); ACE::HTBP::Session *session = (*ch)->session(); ACE::HTBP::Stream *stream = new ACE::HTBP::Stream(session); ACE_Event_Handler *handler = session->handler(); if (handler == 0) { ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Server Accept_Handler::handle_input ") ACE_TEXT ("Creating new stream handler for %d\n"), stream->get_handle())); Stream_Handler *sh = new Stream_Handler(*stream); session->handler (sh); } else ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Server Accept_Handler::handle_input ") ACE_TEXT ("There is already a handler for %d\n"), stream->get_handle())); if ((*ch)->state() == ACE::HTBP::Channel::Data_Queued) { ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Server Accept_Handler::handle_input\n"), ACE_TEXT ("Issuing notification on handler\n"))); this->reactor()->notify (session->handler(), ACE_Event_Handler::READ_MASK); } *ch = 0; } return 0; }
int ACE_SOCK_Connector::shared_connect_finish (ACE_SOCK_Stream &new_stream, const ACE_Time_Value *timeout, int result) { ACE_TRACE ("ACE_SOCK_Connector::shared_connect_finish"); // Save/restore errno. ACE_Errno_Guard error (errno); if (result == -1 && timeout != 0) { // Check whether the connection is in progress. if (error == EINPROGRESS || error == EWOULDBLOCK) { // This expression checks if we were polling. if (timeout->sec () == 0 && timeout->usec () == 0) { #if defined(ACE_WIN32) // In order to detect when the socket that has been // bound to is in TIME_WAIT we need to do the connect // (which will always return EWOULDBLOCK) and then do an // ACE::handle_timed_complete() (with timeout==0, // i.e. poll). This will do a select() on the handle // which will immediately return with the handle in an // error state. The error code is then retrieved with // getsockopt(). Good sockets however will return from // the select() with ETIME - in this case return // EWOULDBLOCK so the wait strategy can complete the // connection. if(ACE::handle_timed_complete (new_stream.get_handle (), timeout) == ACE_INVALID_HANDLE) { int const tmp = errno; if (tmp != ETIME) { error = tmp; } else error = EWOULDBLOCK; } else result = 0; #else /* ACE_WIN32 */ error = EWOULDBLOCK; #endif /* ACE_WIN32 */ } // Wait synchronously using timeout. else if (this->complete (new_stream, 0, timeout) == -1) error = errno; else return 0; } } // EISCONN is treated specially since this routine may be used to // check if we are already connected. if (result != -1 || error == EISCONN) // Start out with non-blocking disabled on the <new_stream>. new_stream.disable (ACE_NONBLOCK); else if (!(error == EWOULDBLOCK || error == ETIMEDOUT)) new_stream.close (); return result; }