void NetServer::onLeaveUdpGroup( const NetGroupOp& op ) { NetGroupMap::iterator i = m_groups.find( op.groupId ); if ( i == m_groups.end() ) { LOG( FT_WARN, _T("NetServer::onLeaveUdpGroup> %d not found"), op.groupId ); return; } TcpConnection* c = m_tcp.FindById( op.connectionId ); if ( c != 0 ) { c->SetGroup( 0 ); } NetGroup* group = i->second; K_ASSERT( group != 0 ); group->Leave( op.connectionId ); LOG( FT_DEBUG, _T("NetServer::onLeaveUdpGroup> %d left from %d"), op.connectionId, group->GetId() ); }
TcpConnection* TcpConnector::connect(const TcpSockAddr& addr, int &ret, long timeout) { int sockfd; ret = -1; sockfd = socket(addr.ai_family, SOCK_STREAM, 0); if (sockfd < 0) return 0L; if (timeout < 0) ret = ::connect(sockfd, (struct sockaddr*)&addr.ai_addr, addr.ai_addrlen); else ret = timeout_connect(sockfd, (struct sockaddr*)&addr.ai_addr, addr.ai_addrlen, timeout); if (ret < 0) { close(sockfd); return 0L; } else { TcpConnection *tcon; tcon = new TcpConnection; if (tcon->set_fd(sockfd) < 0) { delete tcon; return 0L; } else { return tcon; } } }
void SocketConnList::drain() { // First, let all the Connection prepare for drain ConnectionList::drain(); // this will block until draining is complete KernelBufferDrainer::instance().monitorSockets(DRAINER_CHECK_FREQ); // handle disconnected sockets const map<ConnectionIdentifier, vector<char> > &discn = KernelBufferDrainer::instance().getDisconnectedSockets(); map<ConnectionIdentifier, vector<char> >::const_iterator it; for (it = discn.begin(); it != discn.end(); it++) { const ConnectionIdentifier &id = it->first; TcpConnection *con = (TcpConnection *)SocketConnList::instance().getConnection(id); JTRACE("recreating disconnected socket") (id); // reading from the socket, and taking the error, resulted in an // implicit close(). // we will create a new, broken socket that is not closed con->onError(); } }
void NetWork::event_loop() { TRACE(__PRETTY_FUNCTION__); //最大时间间隔是1s int timeout = 1000; IOEvent events[128]; while (!_stop) { memset(events, 0, sizeof(events)); int n = _sock_event->get_events(timeout, events, 128); for (int i = 0; i < n; i++) { IOEvent &ev = events[i]; TcpConnection *conn = ev.conn; conn->add_ref(); if (ev._read_ocurr) conn->on_read_event(); if (ev._write_ocurr) { conn->on_write_event(); } conn->release(); } //定时处理 time_process(time(NULL)); } }
TcpConnection* TcpAcceptor::accept(const TcpSockAddr& expect, int &ret, long timeout) { TcpConnection* conn; TcpSockAddr peer_sock; conn = accept(ret, timeout); if (!conn) return 0L; conn->get_remote_addr(peer_sock); if (peer_sock.ai_family == expect.ai_family) { if (peer_sock.ai_family == AF_INET) { // so dirty ~_~ if (!memcmp(&(((struct sockaddr_in*)&(peer_sock.ai_addr))->sin_addr), &(((struct sockaddr_in*)&(expect.ai_addr))->sin_addr), sizeof(struct in_addr))) return conn; } else if (peer_sock.ai_family == AF_INET6) { // so dirty ~_~ if (!memcmp(&(((struct sockaddr_in6*)&(peer_sock.ai_addr))->sin6_addr), &(((struct sockaddr_in6*)&(expect.ai_addr))->sin6_addr), sizeof(struct in6_addr))) return conn; } } delete conn; return 0L; }
void runTest() { m_strategy = new OpenConnectionStrategy(this); m_client_listener = new ClientTcpListener(this); m_server_listener = new ServerTcpListener(this); m_evmanager = new EventManager(); m_client = new TcpConnection(IpAddress("127.0.0.1", 9999)); m_listener = new TcpListener(); m_listener->listen(9999); m_listener->setListener(this); m_client->setEventManager(m_evmanager); m_client->setListener(m_client_listener); m_client->open(); m_evmanager->registerClient(m_listener); m_evmanager->registerClient(m_client); m_evmanager->execute(); delete m_strategy; delete m_client_listener; delete m_server_listener; delete m_client; delete m_server; delete m_listener; delete m_evmanager; }
int NetWork::getStatus(int handle) { TRACE(__PRETTY_FUNCTION__); int status = NET_STATE_INVALID_HANDLE; TcpConnection *conn = _online_user.getconn(handle); if (conn != NULL) { switch (conn->getstate()) { case WAIT_CLOSE: case CLOSED: status = NET_STATE_CLOSED; break; //connecting 和 CRYPTREGING都认为是正在连接状态 case CONNECTING: case CRYPTREGING: status = NET_STATE_CONNECTING; break; case CONNECTED: status = NET_STATE_CONNECTED; break; default: break; } conn->release(); } return status; }
TcpConnection* TcpConnector::connect(const Address& addr, int& ret, long timeout) { int sockfd; struct addrinfo *res; res = addr.addr; ret = -1; if (res == NULL) return 0L; do { sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); if (sockfd < 0) continue; if (timeout < 0) { if ((ret=::connect(sockfd, res->ai_addr, res->ai_addrlen)) == 0) break; } else { if ((ret=timeout_connect(sockfd, res->ai_addr, res->ai_addrlen, timeout)) == 0) break; } close(sockfd); } while ((res = res->ai_next) != NULL); if (res) { TcpConnection *tcon; tcon = new TcpConnection; if (tcon->set_fd(sockfd) < 0) { delete tcon; return 0L; } else { return tcon; } } else { return 0L; } }
void SocketConnList::scanForPreExisting() { // FIXME: Detect stdin/out/err fds to detect duplicates. vector<int> fds = jalib::Filesystem::ListOpenFds(); for (size_t i = 0; i < fds.size(); ++i) { int fd = fds[i]; if (!Util::isValidFd(fd)) continue; if (dmtcp_is_protected_fd(fd)) continue; string device = jalib::Filesystem::GetDeviceName(fd); JTRACE("scanning pre-existing device") (fd) (device); if (device == jalib::Filesystem::GetControllingTerm()) { } else if(dmtcp_is_bq_file && dmtcp_is_bq_file(device.c_str())) { } else if( fd <= 2 ){ } else if (Util::strStartsWith(device, "/")) { } else { JNOTE("found pre-existing socket... will not be restored") (fd) (device); TcpConnection* con = new TcpConnection(0, 0, 0); con->markPreExisting(); add(fd, con); } } }
void *thread_fun(void *arg) { TcpConnection *server = (TcpConnection*)arg; server->event_loop(); return NULL; }
err_t TcpServer::onAccept(tcp_pcb *clientTcp, err_t err) { // Anti DDoS :-) if (system_get_free_heap_size() < 6500) { debugf("\r\n\r\nCONNECTION DROPPED\r\n\t(%d)\r\n\r\n", system_get_free_heap_size()); return ERR_MEM; } #ifdef NETWORK_DEBUG debugf("onAccept state: %d K=%d", err, totalConnections); list_mem(); #endif if (err != ERR_OK) { //closeTcpConnection(clientTcp, NULL); return err; } TcpConnection* client = createClient(clientTcp); if (client == NULL) return ERR_MEM; client->setTimeOut(timeOut); onClient((TcpClient*)client); return ERR_OK; }
bool ListenerWorker::onUpdateTimer(void* args) { UInt32 currTime = TimeUtil::getTimeSec(); if (currTime - mLastSecurityCheckTime >= 300000) { mLastSecurityCheckTime = currTime; for (Set<TcpConnection*>::Iter* iter = mMainConnectionSet.begin(); iter != NULL;) { TcpConnection* conn = iter->mValue; if (conn) { if (currTime - conn->getRecvTimeStamp() >= CLIENT_CONNECTION_SECURITY_CHECK_INTERVAL) { LOG_WARN("Connection [%s:%u] was closed for timeout.", conn->getFromIp().c_str(), conn->getFromPort()); LYNX_DEREGISTER_RECEIVED(conn, this, &ListenerWorker::onMainReceived); LYNX_DEREGISTER_CONNECT_BROKEN(conn, this, &ListenerWorker::onMainDisconnected); conn->close(); iter = mMainConnectionSet.erase(iter); XDELETE(conn); } else { iter = mMainConnectionSet.next(iter); } } } for (Set<TcpConnection*>::Iter* iter = mGMConnectionSet.begin(); iter != NULL;) { TcpConnection* conn = iter->mValue; if (conn) { if (currTime - conn->getRecvTimeStamp() >= CLIENT_CONNECTION_SECURITY_CHECK_INTERVAL) { LOG_WARN("Connection [%s:%u] was closed for timeout.", conn->getFromIp().c_str(), conn->getFromPort()); LYNX_DEREGISTER_RECEIVED(conn, this, &ListenerWorker::onGMReceived); LYNX_DEREGISTER_CONNECT_BROKEN(conn, this, &ListenerWorker::onGMDisconnected); conn->close(); iter = mGMConnectionSet.erase(iter); XDELETE(conn); } else { iter = mGMConnectionSet.next(iter); } } } } return true; }
err_t TcpConnection::staticOnReceive(void *arg, tcp_pcb *tcp, pbuf *p, err_t err) { // debugf("Static OnReceive buf = %d", tcp_sndbuf(tcp)); TcpConnection* con = (TcpConnection*)arg; err_t ret_err; //Serial.println("echo_recv!"); if (con == NULL) { if (p != NULL) { /* Inform TCP that we have taken the data. */ tcp_recved(tcp, p->tot_len); pbuf_free(p); } closeTcpConnection(tcp); return ERR_OK; } else con->sleep = 0; if (err != ERR_OK /*&& err != ERR_CLSD && err != ERR_RST*/) { debugf("Received ERROR %d", err); /* exit and free resources, for unkown reason */ if (p != NULL) { /* Inform TCP that we have taken the data. */ tcp_recved(tcp, p->tot_len); pbuf_free(p); } closeTcpConnection(tcp); // ?? con->tcp = NULL; con->onError(err); //con->close(); return err == ERR_ABRT ? ERR_ABRT : ERR_OK; } //if (tcp != NULL && tcp->state == ESTABLISHED) // If active /* We have taken the data. */ if (p != NULL) tcp_recved(tcp, p->tot_len); err_t res = con->onReceive(p); if (p != NULL) pbuf_free(p); else { con->close(); closeTcpConnection(tcp); } con->checkSelfFree(); //debugf("<staticOnReceive"); return res; }
void NetClient::onGroupPrepare( MessagePtr m ) { K_ASSERT( m_groupId == 0 ); K_ASSERT( m_selfTag == 0 ); m_groupId = 0; m_selfTag = 0; NmGroupPrepare* gp = static_cast<NmGroupPrepare*>( m.Get() ); TcpConnection* c = m_tcp.FindById( gp->remote ); if ( c == 0 ) { LOG( FT_WARN, _T("NetClient::onGroupPrepare> Connection %d not found"), gp->remote ); return; } m_udp.Fini(); m_groupId = gp->groupId; m_selfTag = gp->connectionId; // init udp with c as a relay connection // use same ip:port as TCP connection bool rc = m_udp.Init( this, &m_ios, c->GetSocket()->GetAddress(), m_selfTag, gp->sl, gp->challenge, c ); if ( !rc ) { LOG( FT_WARN, _T("NetClient::onGroupPrepare> Failed to init udp") ); return; } NmGroupPrepared* p = new NmGroupPrepared; p->remote = gp->remote; p->groupId = gp->groupId; p->connectionId = gp->connectionId; p->in = c->GetSocket()->GetAddress(); m_tcp.Send( m->remote, MessagePtr( p ) ); LOG( FT_DEBUG, _T("NetClient::onGroupPrepare> Prepared group %d connection %d"), gp->groupId, gp->connectionId ); }
void NetWork::OnlineUser::clear() { Guard g(_mutex); //将closing的数据清除掉 while (!_recycles.empty()) { TcpConnection *conn = _recycles.front(); _recycles.pop_front(); conn->release(); } }
void TcpConnection::staticOnError(void *arg, err_t err) { TcpConnection* con = (TcpConnection*)arg; if (con == NULL) return; con->tcp = NULL; // IMPORTANT. No available connection after error! con->onError(err); con->checkSelfFree(); //debugf("<staticOnError"); }
//数据发送,只是投递到发送缓冲区,return 0:表示投递成功 bool NetWork::send(int handle, const char *buffer, int len) { bool ret = false; TcpConnection *conn = _online_user.getconn(handle); if (conn != NULL) { ret = conn->deliver_data(buffer, len); conn->release(); } return ret; }
void SocketConnList::scanForPreExisting() { // TODO: This is a hack when SLURM + MPI are used: // when we use command // srun/ibrun dmtcp_launch a.out // inside the SLURM submission script, the MPI launching // process will not run under the control of DMTCP. Instead, // only the computing processes are. The launching process // will create some sockets, and then create the computing // processes. Hence the sockets are shared among the created // processes at the time when dmtcp_launch is launched. DMTCP // will treat these sockets as pre-existing sockets instead of // shared sockets. // // In the future, we should generalize the processing of // pre-existing fds. For example, at checkpoint time, determine // which sockets are shared, regardless of whether they are // pre-existing or not. This can be done by adding an extra round // of leader election. if (getenv("SLURM_JOBID") || (getenv("SLURM_JOB_ID"))) { return; } // FIXME: Detect stdin/out/err fds to detect duplicates. vector<int>fds = jalib::Filesystem::ListOpenFds(); for (size_t i = 0; i < fds.size(); ++i) { int fd = fds[i]; if (!Util::isValidFd(fd)) { continue; } if (dmtcp_is_protected_fd(fd)) { continue; } string device = jalib::Filesystem::GetDeviceName(fd); JTRACE("scanning pre-existing device") (fd) (device); if (device == jalib::Filesystem::GetControllingTerm()) {} else if (dmtcp_is_bq_file && dmtcp_is_bq_file( device.c_str())) {} else if (fd <= 2) {} else if (Util::strStartsWith(device.c_str(), "/")) {} else { JNOTE("found pre-existing socket... will not be restored") (fd) (device); TcpConnection *con = new TcpConnection(0, 0, 0); con->markPreExisting(); add(fd, con); } } }
TcpServer::~TcpServer() { this->cleanupClosedConnections(); std::vector<TcpConnection*>::iterator it = currentConnections.begin(); while (it != currentConnections.end()) { TcpConnection* connection = *it; connection->stop(); delete connection; currentConnections.erase(it++); } }
TcpConnection *NetWork::OnlineUser::getconn(int handle) { Guard g(_mutex); TcpConnection *conn = NULL; ONLINE_USER_ITER iter = _conns.find(handle); if (iter != _conns.end()) { conn = iter->second; conn->add_ref(); } return conn; }
static inline void on_write(uv_write_t* req, int status) { TcpConnection::UvWriteData* write_data = static_cast<TcpConnection::UvWriteData*>(req->data); TcpConnection* connection = write_data->connection; // Delete the UvWriteData struct (which includes the uv_req_t and the store char[]). std::free(write_data); // Just notify the TcpConnection when error. if (status) connection->onUvWriteError(status); }
err_t TcpConnection::staticOnSent(void *arg, tcp_pcb *tcp, uint16_t len) { TcpConnection* con = (TcpConnection*)arg; if (con == NULL) return ERR_OK; else con->sleep = 0; err_t res = con->onSent(len); con->checkSelfFree(); //debugf("<staticOnSent"); return res; }
AP_MSG_HANDLER_METHOD(ServerModule, TcpServer_SendSrpc) { String sMsg = pMsg->srpc.toString(); if (apLog_IsVerbose) { apLog_Verbose((LOG_CHANNEL, LOG_CONTEXT, "conn=" ApHandleFormat " send: %s", ApHandlePrintf(pMsg->hConnection), _sz(sMsg))); } sMsg += "\n"; TcpConnection* pConnection = findTcpConnection(pMsg->hConnection); if (pConnection == 0) { throw ApException(LOG_CONTEXT, "findTcpConnection(" ApHandleFormat ") failed", ApHandlePrintf(pMsg->hConnection)); } if (! pConnection->DataOut((unsigned char*) sMsg.c_str(), sMsg.bytes()) ) { throw ApException(LOG_CONTEXT, "Connection " ApHandleFormat " DataOut() failed", ApHandlePrintf(pMsg->hConnection)); } pMsg->apStatus = ApMessage::Ok; }
TcpConnection* TcpAcceptor::accept(int &ret, long timeout) { struct timeval *ptimeout; struct timeval time; fd_set r_fdset; int conn_fd; TcpConnection *conn; if (timeout < 0) { // block ptimeout = NULL; } else { time.tv_sec = timeout; time.tv_usec = 0; ptimeout = &time; } while (1) { FD_ZERO(&r_fdset); FD_SET(listen_fd, &r_fdset); switch (select(listen_fd + 1, &r_fdset, NULL, NULL, ptimeout)) { case 0: ret = E_TIMEOUT; return 0L; case -1: ret = -1; return 0L; default: if (FD_ISSET(listen_fd, &r_fdset)) break; else continue; } break; } if ((conn_fd=::accept(listen_fd, NULL, NULL)) < 0) { ret = -1; return 0L; } conn = new TcpConnection; if (conn->set_fd(conn_fd) < 0) { delete conn; return 0L; } else { return conn; } }
//主动关闭连接 void NetWork::close(int handle) { TRACE(__PRETTY_FUNCTION__); TcpConnection *conn = _online_user.getconn(handle); if (conn != NULL) { //从kqueue/epoll中线拿出来,防止再触发事件,导致各种复杂耦合 _sock_event->remove_event(conn); //判断removeconn的返回值,主线程和业务线程同时调用close时,产生conn->close重复调用 if (_online_user.removeconn(handle)) { conn->close(); } conn->release(); } return; }
int NetWork::make_handle() { static int h = 0; TcpConnection *conn = NULL; h++; while ((conn = _online_user.getconn(h)) != NULL) { h++; if (h >= MAX_INT) { h = 0; } conn->release(); } return h; }
err_t TcpConnection::staticOnConnected(void *arg, tcp_pcb *tcp, err_t err) { TcpConnection* con = (TcpConnection*)arg; if (con == NULL) { debugf("OnConnected ABORT"); //closeTcpConnection(tcp); tcp_abort(tcp); return ERR_ABRT; } else debugf("OnConnected"); err_t res = con->onConnected(err); con->checkSelfFree(); //debugf("<staticOnConnected"); return res; }
err_t TcpConnection::staticOnPoll(void *arg, tcp_pcb *tcp) { TcpConnection* con = (TcpConnection*)arg; if (con == NULL) { closeTcpConnection(tcp); return ERR_OK; } //if (tcp->state != ESTABLISHED) // return ERR_OK; con->sleep++; err_t res = con->onPoll(); con->checkSelfFree(); //debugf("<staticOnPoll"); return res; }
int NetWork::connect(const char *ip, unsigned short port, bool encrypt) { int handle = -1; TcpConnection *conn = new TcpConnection(this); // if(encrypt) conn->set_encrypt(); if (conn->connect(ip, port)) { if (_online_user.addconn(conn)) { _sock_event->add_event(conn, true, true); handle = conn->gethandle(); } } else { LOGI("NetWork::connect connect fail : %s", strerror(errno)); //如果连接失败,将底层的网络错误码的负值返回回去; handle = (conn->last_sys_errno() * (-1)); delete conn; } return handle; }
void NetServer::onDestroyUdpGroup( const NetGroupOp& op ) { NetGroupMap::iterator i = m_groups.find( op.groupId ); if ( i == m_groups.end() ) { LOG( FT_WARN, _T("NetServer::onDestroyUdpGroup> %d not found"), op.groupId ); return; } NetGroup* group = i->second; K_ASSERT( group != 0 ); // make TcpConnection to forget about group const std::vector<uint>& remotes = group->GetRemotes(); std::vector<uint>::const_iterator ci( remotes.begin() ); std::vector<uint>::const_iterator ciEnd( remotes.end() ); for ( ; ci != ciEnd; ++ci ) { TcpConnection* c = m_tcp.FindById( *ci ); if ( c != 0 ) { c->SetGroup( 0 ); } } group->Fini(); LOG( FT_DEBUG, _T("NetServer::onDestroyUdpGroup> %d destroyed"), group->GetId() ); delete group; m_groups.erase( i ); }