error_code asio_network_provider::start(rpc_channel channel, int port, bool client_only) { if (_acceptor != nullptr) return ERR_SERVICE_ALREADY_RUNNING; dassert(channel == RPC_CHANNEL_TCP || channel == RPC_CHANNEL_UDP, "invalid given channel %s", channel.to_string()); _address = end_point(boost::asio::ip::host_name().c_str(), port); if (!client_only) { auto v4_addr = boost::asio::ip::address_v4::any(); //(ntohl(_address.ip)); ::boost::asio::ip::tcp::endpoint ep(v4_addr, _address.port); try { _acceptor.reset(new boost::asio::ip::tcp::acceptor(_io_service, ep, true)); do_accept(); } catch (boost::system::system_error& err) { printf("boost asio listen on port %u failed, err: %s\n", port, err.what()); return ERR_ADDRESS_ALREADY_USED; } } return ERR_OK; }
server::server(const std::string& address, const std::string& port, const std::string& doc_root, std::size_t pool_size) : io_service_pool_(pool_size), signals_(io_service_pool_.get_io_service()), acceptor_(io_service_pool_.get_io_service()), connection_manager_(), request_handler_(doc_root) { signals_.add(SIGINT); signals_.add(SIGTERM); #if defined(SIGQUIT) signals_.add(SIGQUIT); #endif // defined(SIGQUIT) do_await_stop(); boost::asio::ip::tcp::resolver resolver(acceptor_.get_io_service()); boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve( { address, port }); acceptor_.open(endpoint.protocol()); acceptor_.set_option(boost::asio::ip::tcp::acceptor::reuse_address(1)); acceptor_.bind(endpoint); acceptor_.listen(); do_accept(); }
/// Construct the server to listen on the specified TCP address and port, and /// serve up files from the given directory. explicit server(const std::string& address, const std::string& port, const request_handler_type& handler) : io_service_(), signals_(io_service_), acceptor_(io_service_), connection_manager_(), socket_(io_service_), request_handler_(handler) { // Register to handle the signals that indicate when the server should exit. // It is safe to register for the same signal multiple times in a program, // provided all registration for the specified signal is made through Asio. signals_.add(SIGINT); signals_.add(SIGTERM); #if defined(SIGQUIT) signals_.add(SIGQUIT); #endif // defined(SIGQUIT) do_await_stop(); // Open the acceptor with the option to reuse the address (i.e. SO_REUSEADDR). boost::asio::ip::tcp::resolver resolver(io_service_); boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve({address, port}); acceptor_.open(endpoint.protocol()); acceptor_.set_option(boost::asio::ip::tcp::acceptor::reuse_address(true)); acceptor_.bind(endpoint); acceptor_.listen(); do_accept(); }
explicit ws_echo_server( std::ostream& log, kind k = kind::sync) : log_(log) , work_(ioc_.get_executor()) , ts_(ioc_) , ws_(ts_) { beast::websocket::permessage_deflate pmd; pmd.server_enable = true; pmd.server_max_window_bits = 9; pmd.compLevel = 1; ws_.set_option(pmd); switch(k) { case kind::sync: t_ = std::thread{[&]{ do_sync(); }}; break; case kind::async: t_ = std::thread{[&]{ ioc_.run(); }}; do_accept(); break; case kind::async_client: t_ = std::thread{[&]{ ioc_.run(); }}; break; } }
server::server(boost::asio::io_service& io_service, short port, thread_pool& pool) : _acceptor(io_service, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), port)) , _socket(io_service) , _pool(pool) { do_accept(); }
void HttpServer::asyncAccept(boost::system::error_code ec) { if (!_acceptor.is_open()) { std::cout << " Acceptor is not open to accept any new connection. " << std::endl; return; } if (!ec) { // Create new HttpClientConnection using the above socket std::cout << " New Client connection created. " << std::endl; HttpClientConnection::SmartPtr clientConn(new HttpClientConnection(_socket, _clientManager , _reqHandler)); _clientManager->start(clientConn); _socket = boost::shared_ptr<boost::asio::ip::tcp::socket>(new boost::asio::ip::tcp::socket(_ioService)); } else { std::cout << " Error while accepting : " << ec.message() << std::endl; } // Ready to accept new request do_accept(); }
server::server(short port) : io_service_() , acceptor_(io_service_, tcp::endpoint(tcp::v4(), port)) , socket_(io_service_) { do_accept(); }
int do_server(int port, int *ret, int (*cb)(), char *context) { int sock; char *name; int accept_socket; int i; if (!init_server(&accept_socket,port)) return(0); if (ret != NULL) { *ret=accept_socket; /* return(1);*/ } for (;;) { if (do_accept(accept_socket,&sock,&name) == 0) { SHUTDOWN(accept_socket); return(0); } i=(*cb)(name,sock, context); if (name != NULL) OPENSSL_free(name); SHUTDOWN2(sock); if (i < 0) { SHUTDOWN2(accept_socket); return(i); } } }
static void set_active (GSocketService *service, gboolean active) { gboolean notify = FALSE; active = !!active; G_LOCK (active); if (active != service->priv->active) { service->priv->active = active; notify = TRUE; if (active) { if (service->priv->outstanding_accept) g_cancellable_cancel (service->priv->cancellable); else do_accept (service); } else { if (service->priv->outstanding_accept) g_cancellable_cancel (service->priv->cancellable); } } G_UNLOCK (active); if (notify) g_object_notify (G_OBJECT (service), "active"); }
// Start accepting incoming connections void run() { if(! acceptor_.is_open()) return; do_accept(); }
server::server(/*const std::string& address, */const std::string& port, const std::string& doc_root) : io_service_(), signals_(io_service_), acceptor_(io_service_), connection_manager_(),/*构造一个管理连接的对象*/ socket_(io_service_), request_handler_(doc_root) { // Register to handle the signals that indicate when the server should exit. // It is safe to register for the same signal multiple times in a program, // provided all registration for the specified signal is made through Asio. signals_.add(SIGINT); //添加退出时信号 signals_.add(SIGTERM); #if defined(SIGQUIT) signals_.add(SIGQUIT); #endif // defined(SIGQUIT) do_await_stop(); ////注册退出时执行的函数,信号和槽机制应该 //打开端口,监听套接字 // Open the acceptor with the option to reuse the address (i.e. SO_REUSEADDR). boost::asio::ip::tcp::resolver resolver(io_service_); //boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve({address, port}); boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve({ boost::asio::ip::tcp::v4(), port }); acceptor_.open(endpoint.protocol()); acceptor_.set_option(boost::asio::ip::tcp::acceptor::reuse_address(true)); acceptor_.bind(endpoint); acceptor_.listen(); std::cout << "service started" << std::endl; //,并将收到的套接字交给connection_manager_处理 do_accept(); }
void mloop() { fd_set rfds; fd_set wfds; while(loop_flag){ FD_ZERO(&rfds); FD_ZERO(&wfds); rfdset(moption.mcsocket, &rfds); wfdset(moption.mcsocket, &wfds); cfdset(moption.comm, &rfds, &wfds); if(do_select(&rfds, &wfds)){ do_pong(); do_free(); }else{ do_pong(); do_recv(); do_send(); do_accept(moption.comm, &rfds); do_comexe(moption.comm, &rfds); do_exechk(moption.comm); } if(log_level != moption.loglevel){ lprintf(0, "%s: loglevel change %d to %d\n", __func__, moption.loglevel, log_level); moption.loglevel = log_level; } } }
static coro_sock* sock_accept_inner(coro_sock *sock, struct sockaddr *addr, socklen_t *len) { event_base *base = __g_coroutine_ctx->base; uthread_t cur = coro_current_uthread(); struct event * ev = (struct event *)coro_uthread_get_data(cur); if ( ev == NULL ) { ev = event_new(base, sock->sock, EV_READ | EV_PERSIST, sock_raw_accept_event, (void *)__g_coroutine_ctx); coro_uthread_set_data(cur, ev, (free_data)event_free); } event_add(ev, NULL); // 如果accept操作是主线程操作的,我们应该resume IO协程 // 让IO协程帮我们处理事件,完成accept通知 // 如果accept操作是协程处理的,那么切回协程 // 参看sock_accept_event_cb的切换规则, // 因为WAIT_ACCEPT状态只有sock_raw_accept_event才能解除 // 因此这里这里一定是sock_raw_accept_event切换回来 // 因此,resume和yield返回值一定是sock_raw_accept_event返回的socket fd int ret = coro_schedule_uthread(cur, 0); event_del(ev); int s = sock->sock; coro_sock *client = NULL; if ( ret >= 0 ) { int c = do_accept(s, addr, len); if ( c >= 0 ) { bufferevent *bev = bufferevent_socket_new(base, c, BEV_OPT_CLOSE_ON_FREE); client = sock_assign(c, bev); bufferevent_setcb(bev, sock_buffer_read_event, sock_buffer_write_event, sock_buffer_error_event, (void *)client); bufferevent_enable(bev, EV_READ | EV_WRITE); } } return client; }
void handle_trigger_io() { struct msg_header hdr; struct trigger_service_params params; int ret; int client_fd; client_fd = do_accept(trigger_fd); if (client_fd < 0) return; hdr.len = sizeof(params); ret = read(client_fd, ¶ms, sizeof(params)); if (ret == sizeof(params)) { hdr.type = MSG_TRIGGER_SERVICE; snprintf(params.request_id.ident, sizeof(params.request_id), "SOCKET%d", client_fd); if (libvchan_send(ctrl_vchan, &hdr, sizeof(hdr)) < 0) handle_vchan_error("write hdr"); if (libvchan_send(ctrl_vchan, ¶ms, sizeof(params)) < 0) handle_vchan_error("write params"); } if (ret <= 0) { close(client_fd); } /* do not close client_fd - we'll need it to send the connection details * later (when dom0 accepts the request) */ }
bool pipeline_acceptor::listen( const tcode::io::ip::address& bind_addr ){ if ( _acceptor.listen( bind_addr )){ do_accept(); return true; } return false; }
int net_loop(int parent) { int nfds,connfd; struct epoll_event ev; nfds = epoll_wait(einfo.epollfd, einfo.events, 100, 100); for( uint32_t loop=0 ; loop<nfds ; loop++ ){ if(listenfd == einfo.events[loop].data.fd){ std::cout<<"ready accept isparent:"<<parent<<std::endl; while(!do_accept(listenfd)); }else{ if (einfo.events[loop].events & EPOLLERR) { printf("EPOLLERR: client linkdown:fd %u parent %u\n",einfo.events[loop].data.fd,parent); close(einfo.events[loop].data.fd); continue; } if (einfo.events[loop].events & EPOLLHUP) { printf("EPOLLHUP: client linkdown:fd %u parent %u\n",einfo.events[loop].data.fd,parent); close(einfo.events[loop].data.fd); continue; } if (einfo.events[loop].events & EPOLLPRI) { printf("EPOLLPRI: client linkdown:fd %u parent %u\n",einfo.events[loop].data.fd,parent); close(einfo.events[loop].data.fd); continue; } if (einfo.events[loop].events & EPOLLIN) { printf("\nEPOLLIN start:fd=%u parent %u\n",einfo.events[loop].data.fd, parent); char buf[512]; memset(buf, 0, sizeof(buf)); bool goon=false ,tag=true; int len=0; if(parent){ recv_from_cli(einfo.events[loop].data.fd ,tag); }else{ read_from_parent(einfo.events[loop].data.fd,tag); } if(tag){ ev.data.fd=einfo.events[loop].data.fd; ev.events=EPOLLOUT | EPOLLET; if (epoll_ctl(einfo.epollfd, EPOLL_CTL_MOD, einfo.events[loop].data.fd, &ev) == -1) { std::cerr<<"epoll_ctl2"; return -1; } } printf("EPOLLIN end:\n"); }else if (einfo.events[loop].events & EPOLLOUT) { printf("EPOLLOUT start: fd:%u parent:%u\n",einfo.events[loop].data.fd,parent); write(einfo.events[loop].data.fd, "nihao", sizeof("nihao")); ev.data.fd=einfo.events[loop].data.fd; ev.events=EPOLLIN | EPOLLET; if (epoll_ctl(einfo.epollfd, EPOLL_CTL_MOD, einfo.events[loop].data.fd , &ev) == -1) { std::cerr<<"epoll_ctl3"; return -1; } printf("EPOLLOUT end:\n"); } } } return 0; }
void server::do_accept() { try { socket_.reset( new boost::asio::ip::tcp::socket( io_service_pool_.get_io_service())); acceptor_.async_accept(*socket_, [this](boost::system::error_code ec) { if (!acceptor_.is_open()) { return; } if (!ec) { connection_manager_.start(std::make_shared<connection>( std::move(*socket_), connection_manager_, request_handler_)); } do_accept(); }); } catch (std::exception & ex) { std::cout << "do_accept error:" << ex.what() << std::endl; return; } }
/* handle_connection: handle the connected clients * @listenfd: the socket used to accept connections * * */ void handle_connection(int listenfd) { /* the number of readable fds in the pollfd array */ int nready, i; /* receive buffer */ buffer_t recvbuf; memset(&recvbuf,0,sizeof(buffer_t)); /* set the listenfd to non-block */ //setnonblock(listenfd); /* epollfd set to monitor the related events */ int epollfd; if ( (epollfd = epoll_create(EPOLL_SIZE)) < 0 ) { perror_exit("epoll create error"); } /* epoll event array */ struct epoll_event events[EPOLL_EVENTS]; /* add the listen socket to epoll set */ int state = EPOLLIN; add_epoll_event(epollfd,listenfd,state); while( 1 ) { /* obtain the ready sockets from the epoll set */ if ( (nready = epoll_wait(epollfd,events,EPOLL_EVENTS,INFTIM)) < 0) { perror_exit("epoll wait error"); } /* traverse the ready sockets */ for (i = 0; i < nready; ++i) { int fd = events[i].data.fd; /* listenfd is ready */ if ( fd == listenfd && (events[i].events & EPOLLIN) ) { do_accept(listenfd, epollfd); } /* connected sockets are ready */ else if ( events[i].events & EPOLLIN ) { do_read(fd,epollfd,&recvbuf); } /* read the data from the connected socket and echo it also */ else if ( events[i].events & EPOLLOUT ) { do_write(fd,epollfd,&recvbuf); } } } }
async_asio_echo_serv(short port, uint32_t packet_size = 64, uint32_t pool_size = std::thread::hardware_concurrency()) : io_service_pool_(pool_size), acceptor_(io_service_pool_.get_first_io_service(), ip::tcp::endpoint(ip::tcp::v4(), port)), packet_size_(packet_size) { do_accept(); }
void server::acceptor_callback(boost::system::error_code ec) { if (!ec) { std::make_shared<session>(std::move(_socket), boost::ref(_pool))->start(); } do_accept(); }
int do_server(int port, int type, int *ret, int (*cb)(char *hostname, int s, int stype, unsigned char *context), unsigned char *context, int naccept) { int sock; char *name = NULL; int accept_socket = 0; int i; if (!init_server(&accept_socket,port,type)) return(0); if (ret != NULL) { *ret=accept_socket; /* return(1);*/ } for (;;) { if (type==SOCK_STREAM) { #ifdef OPENSSL_SSL_DEBUG_BROKEN_PROTOCOL if (do_accept(accept_socket,&sock,NULL) == 0) #else if (do_accept(accept_socket,&sock,&name) == 0) #endif { SHUTDOWN(accept_socket); return(0); } } else sock = accept_socket; i=(*cb)(name,sock, type, context); if (name != NULL) OPENSSL_free(name); if (type==SOCK_STREAM) SHUTDOWN2(sock); if (naccept != -1) naccept--; if (i < 0 || naccept == 0) { SHUTDOWN2(accept_socket); return(i); } } }
static void do_forward(Connection *Conn,PCStr(lport),PCStr(peername),int shared,int svsock,int priority,int fromC,int toC) { int cls,svs; CStr(host,MaxHostNameLen); int port; CStr(opt,256); CStr(clntname,1024); CStr(servname,1024); CStr(sockname,1024); if( svsock < 0 ){ SockPrintf(toC,"%s %d no socket to accept\r\n",VER,NO_ACCEPT); return; } SockPrintf(toC,"%s %d forwarding start.\r\n",VER,OK_FORWARD1); opt[0] = 0; Xsscanf(peername,"%[^:]:%d %s",AVStr(host),&port,AVStr(opt)); for(;;){ cls = do_accept(lport,"",shared,priority,fromC,toC); if( cls < 0 ){ if( 0 < PollIn(fromC,1) ){ if( intcom("",fromC,toC,NULL,NULL) == 0 ) continue; else break; } SockPrintf(toC,"%s %d could not accept at %s\r\n", VER,NO_ACCEPT,lport); break; } getpairName(cls,AVStr(sockname),AVStr(clntname)); SockPrintf(toC,"%s %d accepted %s %s\r\n",VER,OK_ACCEPT, sockname,clntname); svs = Socket1("VSAPdata",NEWSOCK,VStrANYPORT,host,port, 0,NULL,0); if( svs < 0 ){ SockPrintf(toC,"%s %d could not connect to %s\r\n", VER,NO_CONNECT,peername); break; }else{ if( strstr(opt,"TRACE") ){ getpairName(svs,AVStr(sockname),AVStr(servname)); SockPrintf(cls,"%s %d %s %s connected.\r\n", VER,OK_CONNECT,sockname,servname); SockPrintf(svs,"%s %d %s %s accepted.\r\n", VER,OK_ACCEPT,sockname,clntname); } relay2_cntl(60*1000,cls,svs,svs,cls,fromC,toC,(IFUNCP)intcom,0); close(svs); SockPrintf(toC,"%s %d relay done.\r\n",VER,OK_RELAYED); } close(cls); } SockPrintf(toC,"%s %d forwarding done.\r\n",VER,OK_FORWARD2); }
HTTPServer& listen(const std::string& address, const std::string& port, HttpsConfig cfg) { boost::asio::spawn( io_service_pool_.get_io_service(), [this, address, port, cfg](boost::asio::yield_context yield) { do_accept(address, port, cfg, yield); }); return *this; }
void log_server::do_accept() { _acceptor.async_accept(_socket, [this](boost::system::error_code ec) { if (!ec) std::make_shared<session>(std::move(_socket), *this, _id++)->start(); do_accept(); } ); }
server::server(int tcpport, int udpport) : m_acceptor(m_io, tcp::endpoint(boost::asio::ip::tcp::v4(), tcpport)) , m_tcpsocket(m_io) { m_udpserver = boost::make_shared<udpserver>(m_io, udpport); m_roomprovider = boost::make_shared<room_provider>(m_io); //m_roomprovider->init_rooms(); do_accept(); }
int main (int argc, char **argv) { int fd_cli = -1, fd_lis = -1, fd_srv = -1; int port = 5190; fd_lis = do_listen (&port); while (1) { printf ("Waiting for connections... (login)\n"); fd_cli = do_accept (fd_lis); fd_srv = do_server ("login.icq.com", 5190); do_exchange (fd_cli, fd_srv, port, argc < 2); printf ("Waiting for connections... (session)\n"); fd_cli = do_accept (fd_lis); fd_srv = do_server (use_host, use_port); do_exchange (fd_cli, fd_srv, port, argc < 2); fflush (stdout); } return 0; }
void server::do_accept() { m_acceptor.async_accept(m_tcpsocket, [this](boost::system::error_code ec) { if (!ec) { boost::make_shared<user_session>(std::move(m_tcpsocket),m_roomprovider)->start(); } do_accept(); }); }
Server::Server(AllConfig all_config) : config{std::move(all_config.server)} , io_service{} , signals{io_service, SIGINT, SIGTERM} , endpoint{config.connection_info.socket} , acceptor{io_service, endpoint} , socket{io_service} , accumulator{std::move(all_config.accumulator)} { do_await_stop(); do_accept(); }
void handle_new_client() { int fd = do_accept(qrexec_daemon_unix_socket_fd); if (fd >= MAX_CLIENTS) { fprintf(stderr, "too many clients ?\n"); exit(1); } clients[fd].state = CLIENT_CMDLINE; buffer_init(&clients[fd].buffer); if (fd > max_client_fd) max_client_fd = fd; }
int main(int argc, char *argv[]) { //Read the configuration file, which means the number of listeners should be dynamic int n_listeners; std::vector<int> port_list; std::vector<std::string> cmd_list; read_config(std::cin,n_listeners,port_list,cmd_list); int* listen_sockfds = new int[n_listeners]; pollfd* pollfds = new pollfd[n_listeners]; /* Set up SIGCHLD handler with SA_NOCLDWAIT (option 3) */ setup_sa_nocldwait( ); /* Set up our listening sockets. */ setup_listeners(listen_sockfds, n_listeners, port_list); /* Set up our pollfds. */ memset(pollfds, 0, n_listeners*sizeof(struct pollfd)); for (int i = 0; i < n_listeners; i++) { pollfds[i].fd = listen_sockfds[i]; pollfds[i].events = POLLIN; } /* Loop infinitely, accepting any connections we get. */ for (;;) { /* Call select() and handle errors. */ if (poll(pollfds, n_listeners, -1) == -1) { if (errno == EINTR) { continue; } else { perror("poll"); return 1; } } /* Iterate through fds, finding any that are ready. */ for (int i = 0; i < n_listeners; i++) { if (pollfds[i].revents & POLLIN) { /* accept and fork the child process */ do_accept(listen_sockfds[i],cmd_list[i]); } } } delete[] listen_sockfds; delete[] pollfds; }