Scheduler::ID Scheduler::RegisterTimeout(Handler&& handler, std::time_t millisecond, bool isCycle) { auto id = GenId(); if (id == 0) { BB_ERROR_LOG("schedule id == 0 ???"); return 0; } auto info = std::make_shared<ScheduleInfo>(io_); info->callback = std::move(handler); info->isCycle = isCycle; info->timeout = std::chrono::milliseconds(millisecond); info->wait.expires_after(info->timeout); info->waitHandler = [this, id](const boost::system::error_code& ec) { if (ec) { maps_.erase(id); BB_ERROR_LOG("schedule[%llu] error:%s", id, ec.message().c_str()); return; } HandleTimeout(id, false); }; maps_[id] = info; HandleTimeout(id, true); return id; }
ebbrt::Future<ebbrt::Buffer> ebbrt::SimpleRemoteHashTable::Get(NetworkId from, const std::string& key) { auto buffer = message_manager->Alloc(sizeof(Header) + key.length()); auto header = reinterpret_cast<Header*>(buffer.data()); header->op = GET_REQUEST; auto op_id = op_id_.fetch_add(1, std::memory_order_relaxed); header->op_id = op_id; auto keybuf = buffer.data() + sizeof(Header); memcpy(keybuf, key.c_str(), key.length()); message_manager->Send(from, ebbid_, buffer); timer->Wait(std::chrono::seconds{5}, [=]() { auto me = EbbRef<SimpleRemoteHashTable>{ebbid_}; me->HandleTimeout(op_id); }); //Store a promise to be fulfilled later auto pair = promise_map_.emplace(std::piecewise_construct, std::forward_as_tuple(op_id), std::forward_as_tuple()); return pair.first->second.GetFuture(); }
/* * Function: EnumAll * Desc: 遍历在用节点 * In: * none * Out: * none * Return code: * 0 - 成功 * -1 - 失败 */ int CRealTime::EnumAll() { /* 枚举开始 */ m_timeHash.EnumBegin(); time_t t_now = time(NULL); while (1) { /* 枚举节点 */ CRealTimeNode * node = (CRealTimeNode *)m_timeHash.EnumNext(); if (node == NULL) break; /* 检查是否超时,进行超时处理 */ if ((t_now - node->m_use) >= m_timeout) { HandleTimeout(node->m_key); /* 放入删除队列 */ m_trash.release(node); } } /* 枚举结束 */ m_timeHash.EnumEnd(); return 0; }
//------------------------------------------------------- void PersistenceHandler::HandleTimeout() { const auto now = boost::chrono::steady_clock::now(); // Don't iterate through all unless we are due for a write timeout. if (now > m_nextTimeout) { m_nextTimeout = boost::chrono::steady_clock::time_point::max(); for (auto entity = m_toBeWritten.begin(); entity != m_toBeWritten.end(); ++entity) { // Should we write this object? if (entity->second.second && entity->second.first < now) { // The object is dirty and its time to write it try { const Safir::Dob::EntityProxy entityProxy = m_dobConnection.Read(entity->first); m_debug << "Periodic write time ended for entity " << entityProxy.GetEntityId() << std::endl; Write(entityProxy,true); // Set next write time ... entity->second.first = now + m_writePeriod[entity->first.GetTypeId()]; // ...and reset dirty flag entity->second.second = false; } catch(const Safir::Dob::NotFoundException &) { // This could happen if the instance has been deleted but we haven't been told yet // Just fall through and erase it from our internal structure. The delete callback will // take care of this. } } if (entity->second.first < m_nextTimeout) { m_nextTimeout = entity->second.first; } } } m_writeTimer.expires_from_now(boost::chrono::seconds(1)); m_writeTimer.async_wait([this](const boost::system::error_code& error) { if (!error) { HandleTimeout(); } }); }
void CThreadWithRequestsAndTimers::LoopFunction() { // Define constants for events. const int TerminateThread = WAIT_OBJECT_0; const int InputMessage = WAIT_OBJECT_0 + 1; HANDLE WaitHandles[2]; WaitHandles[0] = m_CloseEvent; WaitHandles[1] = m_Queue; int NextTimoutTime = TIMEOUT_TIME; OnThreadStart(); while (true) { LogThreadPerformance(); //wait for close event, queue item or timer that elapsed int Result = WaitForMultipleObjects(2, WaitHandles, FALSE, NextTimoutTime); NextTimoutTime = TIMEOUT_TIME; // reset the next timeout time if (Result == TerminateThread) { HandleThreadClose(); return; // This will terminate the thread } if (Result == InputMessage) //requests are waiting - Start processing them ProcessRequests(); // In any case check if need to process timers if (ShouldProcessTimers() == true) { m_LastProcessTimers = GetTickCount(); CTimingCenter::ProcessTimers(TIMERS_MAX_EXECUTION_TIME, NextTimoutTime); } // Calls the thread's function that should provide the call to OnTimeout() HandleTimeout(); // update the next timeout also according to the timeout val. NextTimoutTime = min(NextTimoutTime, (int)m_TimeoutInMilli); } // while }
//------------------------------------------------------- void PersistenceHandler::Start(bool restore) { if (m_started) { m_debug << "Persistence handling already started."<< std::endl; return; } m_debug << "Starting Persistence handling"<< std::endl; try { m_dobConnection.Open(L"DOPE_SUBSCRIBE", L"0", PERSISTENCE_CONTEXT, nullptr, &m_dispatcher); m_debug << "Opened DOB connection DOPE_SUBSCRIBE"<<std::endl; } catch (Safir::Dob::NotOpenException e) { Safir::Logging::SendSystemLog(Safir::Logging::Critical, L"PersistenceHandler failed to connect to Dob, Maybe Dope is already running?"); throw StartupError(); } PerformStartupChecks(); if (restore) { // Normal startup try { m_debug << "Restoring all stored entities"<< std::endl; RestoreAll(); } catch (const Safir::Dob::AccessDeniedException & e) { throw Safir::Dob::Typesystem::SoftwareViolationException (std::wstring(L"DOSE gave me AccessDeniedException when I was trying to Set persisted data! AccessDeniedException info: ") + e.GetExceptionInfo(),__WFILE__,__LINE__); } StartSubscriptions(); ReportPersistentDataReady(); } else { // Failover startup, don't restore anything. if (Safir::Dob::PersistenceParameters::StandaloneMode()) { // If standalone mode then clear all before starting subscriptions RemoveAll(); } StartSubscriptions(); } // Start timer HandleTimeout(); m_started = true; m_debug << "Persistence handling successfully started"<<std::endl; }