void TcpAcceptor :: run() { PLHead("start accept..."); m_oSocket.setAcceptTimeout(500); m_oSocket.setNonBlocking(true); while (true) { struct pollfd pfd; int ret; pfd.fd = m_oSocket.getSocketHandle(); pfd.events = POLLIN; ret = poll(&pfd, 1, 500); if (ret != 0 && ret != -1) { SocketAddress oAddr; int fd = -1; try { fd = m_oSocket.acceptfd(&oAddr); } catch(...) { fd = -1; } if (fd >= 0) { BP->GetNetworkBP()->TcpAcceptFd(); PLImp("accepted!, fd %d ip %s port %d", fd, oAddr.getHost().c_str(), oAddr.getPort()); AcceptData * poData = new AcceptData; poData->fd = fd; poData->oAddr = oAddr; m_oMutex.lock(); m_oFDQueue.push(poData); m_oMutex.unlock(); } } ClearEvent(); if (m_bIsEnd) { PLHead("TCP.Acceptor [END]"); return; } } }
void EventLoop :: StartLoop() { m_bIsEnd = false; while(true) { BP->GetNetworkBP()->TcpEpollLoop(); int iNextTimeout = 1000; DealwithTimeout(iNextTimeout); //PLHead("nexttimeout %d", iNextTimeout); OneLoop(iNextTimeout); //deal with accept fds if (m_poTcpAcceptor != nullptr) { m_poTcpAcceptor->CreateEvent(); } if (m_poTcpClient != nullptr) { m_poTcpClient->DealWithWrite(); } if (m_bIsEnd) { PLHead("TCP.EventLoop [END]"); break; } } }
void TcpWrite :: Stop() { m_oEventLoop.Stop(); join(); PLHead("TcpWriteThread [END]"); }
void TcpRead :: Stop() { m_oEventLoop.Stop(); join(); PLHead("TcpReadThread [END]"); }
void TcpIOThread :: Stop() { if (m_bIsStarted) { m_oTcpAcceptor.Stop(); for (auto & poTcpRead : m_vecTcpRead) { poTcpRead->Stop(); } for (auto & poTcpWrite : m_vecTcpWrite) { poTcpWrite->Stop(); } } PLHead("TcpIOThread [END]"); }
int PNode :: Init(const Options & oOptions, NetWork *& poNetWork) { int ret = CheckOptions(oOptions); if (ret != 0) { PLErr("CheckOptions fail, ret %d", ret); return ret; } m_iMyNodeID = oOptions.oMyNode.GetNodeID(); //step1 init logstorage LogStorage * poLogStorage = nullptr; ret = InitLogStorage(oOptions, poLogStorage); if (ret != 0) { return ret; } //step2 init network ret = InitNetWork(oOptions, poNetWork); if (ret != 0) { return ret; } //step3 build masterlist for (int iGroupIdx = 0; iGroupIdx < oOptions.iGroupCount; iGroupIdx++) { MasterMgr * poMaster = new MasterMgr(this, iGroupIdx, poLogStorage, oOptions.pMasterChangeCallback); assert(poMaster != nullptr); m_vecMasterList.push_back(poMaster); ret = poMaster->Init(); if (ret != 0) { return ret; } } //step4 build grouplist for (int iGroupIdx = 0; iGroupIdx < oOptions.iGroupCount; iGroupIdx++) { Group * poGroup = new Group(poLogStorage, poNetWork, m_vecMasterList[iGroupIdx]->GetMasterSM(), iGroupIdx, oOptions); assert(poGroup != nullptr); m_vecGroupList.push_back(poGroup); } //step5 build batchpropose if (oOptions.bUseBatchPropose) { for (int iGroupIdx = 0; iGroupIdx < oOptions.iGroupCount; iGroupIdx++) { ProposeBatch * poProposeBatch = new ProposeBatch(iGroupIdx, this, &m_oNotifierPool); assert(poProposeBatch != nullptr); m_vecProposeBatch.push_back(poProposeBatch); } } //step6 init statemachine InitStateMachine(oOptions); //step7 parallel init group for (auto & poGroup : m_vecGroupList) { poGroup->StartInit(); } for (auto & poGroup : m_vecGroupList) { int initret = poGroup->GetInitRet(); if (initret != 0) { ret = initret; } } if (ret != 0) { return ret; } //last step. must init ok, then should start threads. //because that stop threads is slower, if init fail, we need much time to stop many threads. //so we put start threads in the last step. for (auto & poGroup : m_vecGroupList) { //start group's thread first. poGroup->Start(); } RunMaster(oOptions); RunProposeBatch(); PLHead("OK"); return 0; }
DFNetWork :: ~DFNetWork() { PLHead("NetWork Deleted!"); }