void CEPInfo::ErasePendRelatNS(const CNSPeerKey &p_rkey)const { std::set<CNSPeerKey>::iterator jt = m_pendingRelatedNSs.find(p_rkey); if (jt == m_pendingRelatedNSs.end()) { NLOG_ERR("[ErasePendRelatNS]: trying to erase NS(ipv6=%s, port=%d) from pending ep list on ep_module with addr=%s", AddrToStr(p_rkey.GetIPv6Addr()).c_str(), p_rkey.GetIPv6Addr().GetPort(), AddrToStr(m_addr).c_str()); assert(false); } m_pendingRelatedNSs.erase(jt); }
void CEPInfo::EraseRelatedNS(const CNSPeerKey &p_rkey)const { std::map<CNSPeerKey/*NS INFO*/, CNetAddressExt/*SS sourceAddres*/>::iterator jt = m_relatedNSs.find(p_rkey); if (jt == m_relatedNSs.end()) { NLOG_ERR("[EraseRelatedNS]: trying to erase NS(ipv6=%s, port=%d) from related ep list on ep_module with addr=%s", AddrToStr(p_rkey.GetIPv6Addr()).c_str(), p_rkey.GetIPv6Addr().GetPort(), AddrToStr(m_addr).c_str()); assert(false); } m_relatedNSs.erase(jt); }
void CIPGRegUpdCmd::Execute() { NLOG_INFO("[IPGRegUpdCmd]: received msg = %s", MsgToStr(m_rMsg).c_str()); CMgmtPOption::OptionIPGatewayInfoData *pOpData1 = NULL; std::list<CMgmtPOption::OptionIPRoutesListData> rawRouteInfo; CMgmtPOption::OptionKeepAliveIntervalData *pOpData2 = NULL; if (!ProcessMsg(&pOpData1, rawRouteInfo, &pOpData2)) { if (m_rIPGList.find(CIPGInfo (m_rMsg.m_oNetAddr)) == m_rIPGList.end()) NLOG_ERR("[IPGRegUpdCmd]: MODULE=%s was not added in CACHE because of invalid register/update INFO ", AddrToStr(m_rMsg.m_oNetAddr).c_str()); return; } ///add module if it doesn't exist CIPGInfo key(m_rMsg.m_oNetAddr); std::set<CIPGInfo>::iterator i = m_rIPGList.find(key); bool isNewIPG = false; if (i == m_rIPGList.end()) { ///FIRST CTunnelKey info(pOpData1->m_IPGAddrIPv4, pOpData1->m_szIPGAddrIPv6, 128); if (m_rFARAndIPGPubInf.insert(info).second == false) { NLOG_WARN("[IPGRegUpdCmd]: MODULE=%s was not added in CACHE because of duplicated publicInfo=%s, " "skip processing msg = %s", AddrToStr(m_rMsg.m_oNetAddr).c_str(), TunnelKeyToStr(info).c_str(), MsgToStr(m_rMsg).c_str()); return; } isNewIPG = true; i = m_rIPGList.insert(CIPGInfo(m_rMsg.m_oNetAddr, CModuleInfo::MT_REGISTERED)).first; i->SetPendingTimedout(m_rApp.GetConfig().m_nPendingRelationTimedoutS); } CIPGInfo *pIPGInfo = (CIPGInfo*)&(*i); pIPGInfo->SetTimedOut(CModuleInfo::MTT_KEEP_ALIVE, pOpData2->m_IntSec, pOpData2->m_IntUSec); pIPGInfo->SetPublicIpgInfo(*pOpData1); if(pIPGInfo->GetState() != CModuleInfo::MT_REGISTERED) pIPGInfo->SetState(CModuleInfo::MT_REGISTERED); ///process std::list<CMgmtPOption::OptionIPRoutesListData> IPGAddedRoute; std::list<CMgmtPOption::OptionIPRoutesListData> IPGEraseRoute; ProcessRegIPG(i, isNewIPG, rawRouteInfo, IPGAddedRoute, IPGEraseRoute); NLOG_INFO("[IPGRegUpdCmd]: raw(routes=%d), add(routes=%d), erase(routes=%d)", rawRouteInfo.size(), IPGAddedRoute.size(), IPGEraseRoute.size()); ///send result if (IPGAddedRoute.size() > 0) //SendAddTunnOnIPG(m_rMsg.m_oNetAddr, IPGAddedTun); -- not add only the difference but all SendSetTunnOnIPG(i); if (IPGEraseRoute.size() > 0) SendDelTunnOnIPG(m_rMsg.m_oNetAddr, IPGEraseRoute, m_rApp); }
void CEPInfo::InsertRelatedNS(const CNSPeerKey &p_rkey, const CNetAddressExt/*SS sourceAddres*/&p_rAddr)const { std::pair<CNSPeerKey/*NS INFO*/, CNetAddressExt/*SS sourceAddres*/> relatedNSElem(p_rkey, p_rAddr); if (m_relatedNSs.insert(relatedNSElem).second == false) { NLOG_ERR("[InsertRelateNS]: NS peerkey (ipv6=%s, port=%d) is already in related NS list on EP!", AddrToStr(p_rkey.GetIPv6Addr()).c_str(), p_rkey.GetIPv6Addr().GetPort()); assert(false); } }
void CEPInfo::MovePendNSToRelNS(const CNSPeerKey &p_rkey, const CNetAddressExt/*SS sourceAddres*/&p_rAddr)const { std::set<CNSPeerKey>::iterator jt = m_pendingRelatedNSs.find(p_rkey); if (jt == m_pendingRelatedNSs.end()) { NLOG_ERR("[MovePendNSToRelSS]: there is no pending NS(ipv6=%s, port=%d) on ep__module_addr=%s", AddrToStr(p_rkey.GetIPv6Addr()).c_str(), p_rkey.GetIPv6Addr().GetPort(), AddrToStr(m_addr).c_str()); assert(false); } std::pair<CNSPeerKey,CNetAddressExt> relatedNS(*jt, p_rAddr); if (m_relatedNSs.insert(relatedNS).second==false) { NLOG_ERR("[MovePendNSToRelNS]: SS peerkey (ipv6=%s, port=%d) in both SS list on EP!", AddrToStr(p_rkey.GetIPv6Addr()).c_str(), p_rkey.GetIPv6Addr().GetPort()); assert(false); } m_pendingRelatedNSs.erase(jt); }
void CEPInfo::InsertPendRelatNS(const CNSPeerKey &p_rkey)const { if (m_pendingRelatedNSs.insert(p_rkey).second == false) { NLOG_ERR("[InsertPendRelatNS]: NS peerkey (ipv6=%s, port=%d) is already in pending related NS list on EP!", AddrToStr(p_rkey.GetIPv6Addr()).c_str(), p_rkey.GetIPv6Addr().GetPort()); assert(false); } CCommPendRelat::StartPendRelatCountDown(); }
const CTunnelKey * CFARDefaultTunnProc::ProcessDefaultTunn(const CFARInfo &p_FarInfo) { if (p_FarInfo.GetIPGsNo() - p_FarInfo.GetPendRelatIPGsNo() == 0) { NLOG_ERR("[ProcessDefaultTunn]: no tunnel keys on FAR=%s!", AddrToStr(p_FarInfo.GetSourceAddr()).c_str()); assert(false); } unsigned int minLoad = MAX_NO; const CTunnelKey *pTunnelKeyWithMin = NULL; CFARInfo::RelatedIPGsIterator relIPGit = p_FarInfo.BeginRelatedIPGs(); for(;relIPGit != p_FarInfo.EndRelatedIPGs(); ++relIPGit) { const CTunnelKey &tunnelKey = relIPGit->first; if (SetMinLoad(tunnelKey, minLoad)) pTunnelKeyWithMin = &tunnelKey; } assert(pTunnelKeyWithMin != NULL); return pTunnelKeyWithMin; }
void CIPGRegUpdCmd::ProcessRegIPG(IPGInfoListT::iterator p_itIPG, bool p_newIPG, std::list<CMgmtPOption::OptionIPRoutesListData> &p_rawRouteInfo, std::list<CMgmtPOption::OptionIPRoutesListData> &p_iIPGAddedRoute, std::list<CMgmtPOption::OptionIPRoutesListData> &p_rIPGEraseRoute) { const CIPGInfo *pIPGInfo = &(*p_itIPG); //clean-up input of garbage if necessary std::list<CMgmtPOption::OptionIPRoutesListData> routeToDel; CFAR_IPG_Proc::ClearGarbInRouteInfoFromIpg(pIPGInfo->GetPublicIpgInfo(), p_rawRouteInfo, routeToDel); ///it might be newIPG from cache point of view but with already info set on it if (p_newIPG) { //tunnels std::list<CMgmtPOption::OptionIPRoutesListData>::iterator kt = p_rawRouteInfo.begin(); int i = 0; for(; kt != p_rawRouteInfo.end(); ++kt) { CMgmtPOption::OptionIPRoutesListData &routeInf = *kt; NLOG_INFO("DELETEME: ipv4=%s", IPv4ToStr(*(unsigned int*)&routeInf.m_GatewayAddr[12]).c_str()); CTunnelKey farKey(*(unsigned int*)&routeInf.m_GatewayAddr[12], routeInf.m_NetPrefix, routeInf.m_NetPrefixLen); pIPGInfo->InsertPendRelatFAR(farKey); NLOG_INFO("[ProcessRegIPG]: new ipg_source_addr=%s comes with routeInfo[%d]=%s", AddrToStr(pIPGInfo->GetSourceAddr()).c_str(), i, RouteDataToStr(routeInf).c_str()); i++; } } ///inter_module_process m_rFAR_IPG_Proc.ProcessRegIPG(p_itIPG, p_newIPG, m_rFARList, p_rawRouteInfo, p_iIPGAddedRoute, p_rIPGEraseRoute); p_rIPGEraseRoute.insert(p_rIPGEraseRoute.end(), routeToDel.begin(), routeToDel.end()); }
/* do I/O polling */ short NATCPtask(na_win *win) { tcpinfo *tcp; rdsEntry *rds, *trds; short result, newstate; short processed = NA_PROCESSED; na_tcp i; tcpwb *wb; int j; /* finish off driver initialization: */ if (!tstate->tcp_driver) { if (!tcp_checkdriver()) return (NA_NOTPROCESSED); if (!tstate->tcp_driver) return (NA_REQCLOSE); } /* loop through connections */ for (i = 0; i < MAX_TCPCON; ++i) { if ((tcp = tstate->tcpbufs[i]) != NULL) do { /* read data if we have it */ if (!tcp->reading && !tcp->rclose && tcp->havedata && tcp->state != TCP_CONNECT) { tcp->rpb.ioCRefNum = tstate->tcp_driver; tcp->rpb.tcpStream = tcp->stream; tcp->rpb.csCode = TCPNoCopyRcv; tcp->rpb.csParam.receive.rdsPtr = (Ptr) tcp->rrds; tcp->rpb.csParam.receive.commandTimeoutValue = 5; tcp->rpb.csParam.receive.rdsLength = RDS; if (tcp->pushed) { tcp->rpb.csParam.receive.commandTimeoutValue = 1; tcp->rpb.csParam.receive.rdsLength = 1; } tcp->havedata = 0; PBControl((ParmBlkPtr) &tcp->rpb, tcp->pushed ? false : true); tcp->reading = 1; tcp->pushed = 0; } if (tcp->reading) { if ((result = tcp->rpb.ioResult) == 1) { processed = NA_NOTPROCESSED; } else { tcp->reading = 0; if (result != noErr) { if (result != commandTimeout) { (*tcp->callback)(tcp->context, i, NATCP_noread, result, NULL); } } else { result = NATCP_data | NATCP_more; if (tcp->rpb.csParam.receive.urgentFlag) tcp->urgent = 1; if (tcp->urgent) result |= NATCP_urgent; if (tcp->rpb.csParam.receive.markFlag) tcp->urgent = 0; for (rds = tcp->rrds; rds->length; ++rds) { if (!rds[1].length) result &= ~NATCP_more; (*tcp->callback)(tcp->context, i, result, rds->length, rds->ptr); } tcp->rpb.csCode = TCPRcvBfrReturn; PBControl((ParmBlkPtr) &tcp->rpb, false); } } } result = tcp->pb.ioResult; newstate = 0; switch (tcp->state) { case TCP_GETHOST: if (tcp->dnrdone) { tcp->rclose = 3; newstate = TCP_CLOSED; if (tcp->host.rtnCode != noErr) { (*tcp->callback)(tcp->context, i, NATCP_nohost, tcp->host.rtnCode, NULL); } else { (*tcp->callback)(tcp->context, i, NATCP_connect, strlen(tcp->host.cname), tcp->host.cname); strcpy(tstate->localhost, tcp->host.cname); } } break; case TCP_RESOLVE: if (tcp->dnrdone) { if (tcp->host.rtnCode != noErr) { tcp->rclose = 3; newstate = TCP_CLOSED; (*tcp->callback)(tcp->context, i, NATCP_nohost, tcp->host.rtnCode, NULL); } else if (!tcp->lclose) { memset((void *) &tcp->pb, 0, sizeof (tcp->pb)); tcp->pb.ioCRefNum = tstate->tcp_driver; tcp->pb.tcpStream = tcp->stream; tcp->pb.csParam.open.ulpTimeoutValue = 30; tcp->pb.csParam.open.ulpTimeoutAction = 1; /* Abort on timeout */ tcp->pb.csParam.open.tosFlags = tstate->TOS; tcp->pb.csParam.open.precedence = tstate->precedence; tcp->pb.csParam.open.validityFlags = timeoutValue|timeoutAction|typeOfService|precedence; tcp->pb.csParam.open.remoteHost = tcp->host.addr[0]; if (tcp->server) { tcp->pb.csCode = TCPPassiveOpen; tcp->pb.csParam.open.commandTimeoutValue = 0; tcp->pb.csParam.open.remotePort = 0; tcp->pb.csParam.open.localPort = tcp->port; } else { tcp->pb.csCode = TCPActiveOpen; tcp->pb.csParam.open.remotePort = tcp->port; tcp->pb.csParam.open.localPort = 0; } PBControl((ParmBlkPtr) &tcp->pb, true); newstate = TCP_CONNECT; } } break; case TCP_CONNECT: if (result == 1) { processed = NA_NOTPROCESSED; break; } if (result != noErr) { tcp->rclose = 3; newstate = TCP_CLOSED; (*tcp->callback)(tcp->context, i, NATCP_nocon, result, NULL); } else { newstate = TCP_READY; if (tcp->server) { tcp->port = tcp->pb.csParam.open.remotePort; if (!*tcp->host.cname) { AddrToStr(tcp->pb.csParam.open.remoteHost, tcp->host.cname); } } (*tcp->callback)(tcp->context, i, NATCP_connect, tcp->port, tcp->host.cname); } break; case TCP_READY: /* Write data if we have it */ wb = tcp->wb + tcp->wbnum; if (wb->rused && (newstate = beginwrite(tcp))) { break; } /* check if other side wants to close */ if (tcp->rclose == 1) { tcp->rclose = 2; (*tcp->callback)(tcp->context, i, NATCP_closing, 0, NULL); } /* check if connection needs closing at this end */ if (tcp->lclose == 1) { tcp->lclose = 2; tcp->pb.csCode = TCPClose; tcp->pb.csParam.close.validityFlags = 0xC0; tcp->pb.csParam.close.ulpTimeoutValue = 30; /* give 30 secs to close */ tcp->pb.csParam.close.ulpTimeoutAction = 0; PBControl((ParmBlkPtr) &tcp->pb, true); newstate = TCP_CLOSING; break; } /* check if connection closed at both ends */ if (tcp->rclose == 3) { (*tcp->callback)(tcp->context, i, NATCP_closed, tcp->reason, NULL); newstate = TCP_CLOSED; } break; case TCP_WRITING: if (result == 1) { processed = NA_NOTPROCESSED; break; } wb = tcp->wb; if (wb->rused != -1) ++wb; freewb(wb); if (result != noErr) { tcp->pushed = 0; (*tcp->callback)(tcp->context, i, NATCP_nowrite, result, NULL); } newstate = TCP_READY; break; case TCP_CLOSING: if (result == 1) { processed = NA_NOTPROCESSED; break; } newstate = TCP_READY; break; case TCP_CLOSED: if (!tcp->rclose) break; if (!tcp->gethost) { tcp->pb.csCode = TCPRelease; PBControl((ParmBlkPtr)&tcp->pb, false); } freewb(tcp->wb); freewb(tcp->wb + 1); DisposPtr((Ptr) tcp); tstate->tcpbufs[i] = NULL; break; } if (newstate) tcp->state = newstate; } while (newstate); } return (processed); }
int CEntryPointApp::Init() { if (!CApp::Init (NIVIS_TMP"NMS_EntryPoint.log")) { NLOG_ERR("[MainApp-Init]: failed!"); return 0; } ///for debugging purpose CSignalsMgr::Install(SIGUSR1); ///on this signal the app will display the subscription servers CSignalsMgr::Install(SIGUSR2); ///on this signal app log some internal structures (device cache) CSignalsMgr::Install(SIGHUP); ///on this signal app will reload some running parameters from config file if (!m_stCfg.Init ()) { NLOG_ERR("[MainApp-Init]: failed to read config!"); return 0; } NLOG_INFO("[MainApp-Init]: dispatcher_addr = %s", AddrToStr(m_stCfg.m_oDispAddr).c_str()); if (!m_oSubscToNMS.Init(m_stCfg.m_oDispAddr, m_stCfg.m_oEPAddr, this, 0/*lifetime*/, m_stCfg.m_nIpv4EPPort)) { NLOG_ERR("[MainApp-Init]: failed to init subscriber to dispatcher_addr = %s!", AddrToStr(m_stCfg.m_oDispAddr).c_str()); return false; } m_oSubscToNMS.SetLinkPrintHexEnabled ( m_stCfg.m_bMgmtpLinkRawPacketLog ); m_oSubscToNMS.SetLinkHexLimit ( m_stCfg.m_nMgmtpLinkHexLimit ); srand(time(NULL)); m_pPersistentMsgId = CEPPersistentMsgId::Ptr( new CEPPersistentMsgId(&(m_oSubscToNMS.GetPersistentMsgId())) ); /* Firmware update manager thread*/ CEPFwUpdateMngThread * fwUpdateMng = new CEPFwUpdateMngThread(m_stCfg, m_pAppDbToConnQueue); m_pFwUploadMng = IEPFwUpdateMng::Ptr(fwUpdateMng); CEPThread* pThread = fwUpdateMng; m_threadsList.push_back(CEPThread::Ptr(pThread)); /* Connection thread for management messages */ pThread = new CEPConnMgmtThread(m_stCfg, m_pPersistentMsgId, m_NSList, m_pMgmtDbToConnQueue, m_pMgmtConnToDbQueue); m_threadsList.push_back(CEPThread::Ptr(pThread)); /* Connection thread for app messages */ pThread = new CEPConnAppThread(m_stCfg, m_pAppDbToConnQueue, m_pAppConnToDbQueue); m_threadsList.push_back(CEPThread::Ptr(pThread)); /* Connection thread for node messages */ pThread = new CEPConnNodeThread(m_pNodeDbToConnQueue, m_pNodeConnToDbQueue, m_pDevCmdTracker, m_stCfg.m_nNetStatLocalPort, m_stCfg.m_BroadcastRspTimedout); ( ( CEPConnNodeThread* ) pThread )->SetPrintHexEnabled ( m_stCfg.m_bSimpleLinkRawPacketLog ); ( ( CEPConnNodeThread* ) pThread )->SetHexLimit ( m_stCfg.m_nSimpleLinkHexLimit ); m_threadsList.push_back(CEPThread::Ptr(pThread)); /* DB threads that read commands and pass them to connection threads */ for (int i = 0; i < m_stCfg.m_nDBReaderThreads; i++) { pThread = new CEPDBReaderThread(m_stCfg, m_pPersistentMsgId, m_dbAppCmdTrackList, m_dbTrackList, m_pAppDbToConnQueue, m_pMgmtDbToConnQueue, m_pNodeDbToConnQueue, m_pFwUploadMng); m_threadsList.push_back(CEPThread::Ptr(pThread)); } /* DB threads that write management messages recvd from mgmt conn thread */ for (int i = 0; i < m_stCfg.m_nDBMgmtWriterThreads; i++) { pThread = new CEPDBMgmtWriterThread(m_stCfg, m_dbTrackList, m_pMgmtConnToDbQueue, m_pMgmtDbToConnQueue); m_threadsList.push_back(CEPThread::Ptr(pThread)); } /* DB threads that write app messages recvd from app conn thread */ for (int i = 0; i < m_stCfg.m_nDBAppWriterThreads; i++) { pThread = new CEPDBAppWriterThread(m_stCfg, m_dbAppCmdTrackList, m_pAppConnToDbQueue, m_pAppDbToConnQueue, m_pFwUploadMng); m_threadsList.push_back(CEPThread::Ptr(pThread)); } /* DB threads that write node messages recvd from node conn thread */ for (int i = 0; i < m_stCfg.m_nDBNodeWriterThreads; i++) { pThread = new CEPDBNodeWriterThread(m_stCfg, m_pNodeConnToDbQueue, m_pNodeDbToConnQueue); m_threadsList.push_back(CEPThread::Ptr(pThread)); } pThread = new CEPDBCleanupThread(m_stCfg); m_threadsList.push_back(CEPThread::Ptr(pThread)); /* * Threads init */ CPThreadWrapper::LibInit(); CEPThread::PtrList::iterator it; for (it = m_threadsList.begin(); it != m_threadsList.end(); it++) { if (*it) { if (!(*it)->Init()) { NLOG_ERR("[MainApp-Init] thr init failed"); return 0; } } else { NLOG_ERR("[MainApp-Init] NULL app link"); return 0; } } m_dbMngt = CEPDBFactory::Create("MainApp-Init:", m_stCfg); return 1; }
void CEPInfo::EraseRelatedFAR(const CNetAddressExt &p_rAddr)const { std::set<CNetAddressExt>::iterator it = m_relatedFARs.find(p_rAddr); if (it == m_relatedFARs.end()) { NLOG_ERR("[EraseRelatedFAR]: related far (source_addr=%s) is not in related FAR list on EP!", AddrToStr(p_rAddr).c_str()); assert(false); } m_relatedFARs.erase(it); }
void CEPInfo::InsertRelatFAR(const CNetAddressExt&p_rAddr)const { if (m_relatedFARs.insert(p_rAddr).second == false) { NLOG_ERR("[InsertRelatFAR]: related far (source_addr=%s) is already in related FAR list on EP!", AddrToStr(p_rAddr).c_str()); assert(false); } }