void life_check(void) { while (!m_need_exit) { std::deque<struct dnet_id> remove; while (!m_need_exit && !m_lifeset.empty()) { size_t time = ::time(NULL); boost::mutex::scoped_lock guard(m_lock); if (m_lifeset.empty()) break; life_set_t::iterator it = m_lifeset.begin(); if (it->lifetime() > time) break; if (it->remove_from_disk()) { struct dnet_id id; dnet_setup_id(&id, 0, (unsigned char *)it->id().id); id.type = -1; remove.push_back(id); } erase_element(&(*it)); } for (std::deque<struct dnet_id>::iterator it = remove.begin(); it != remove.end(); ++it) { dnet_remove_local(m_node, &(*it)); } sleep(1); } }
bool remove(const unsigned char *id) { bool removed = false; bool remove_from_disk = false; boost::mutex::scoped_lock guard(m_lock); iset_t::iterator it = m_set.find(id); if (it != m_set.end()) { remove_from_disk = it->remove_from_disk(); erase_element(&(*it)); removed = true; } guard.unlock(); if (remove_from_disk) { struct dnet_id raw; dnet_setup_id(&raw, 0, (unsigned char *)id); raw.type = -1; dnet_remove_local(m_node, &raw); } return removed; }
void processing_lists_cache::specific_infer_cache_for(const E& typed_handle) { const auto id = typed_handle.get_id(); const auto it = per_entity_cache.try_emplace(id.to_unversioned()); auto& cache = (*it.first).second; all_processing_flags new_flags; if (typed_handle.get_flag(entity_flag::IS_PAST_CONTAGIOUS)) { new_flags.set(processing_subjects::WITH_ENABLED_PAST_CONTAGIOUS); } if (/* cache_existed */ !it.second) { if (cache.recorded_flags == new_flags) { return; } } augs::for_each_enum_except_bounds([&](const processing_subjects key) { auto& list = lists[key]; erase_element(list, id); if (new_flags.test(key)) { list.push_back(id); } }); cache.recorded_flags = new_flags; }
~cache_t() { m_need_exit = true; m_lifecheck.join(); while (!m_lru.empty()) { data_t raw = m_lru.front(); erase_element(&raw); } }
void FastVolume::undo(){ if(undo_info.size() == 0)return; if(is_action_marker(undo_info.back()))undo_info.pop_back(); //remove marker, if any; while(undo_info.size() != 0){ UndoOp cur = undo_info.back(); //check back if(is_action_marker(cur)) return; //return if marker; undo_info.pop_back(); //remove current action from stack if(mask[cur.pos] == cur.after){ if((cur.after & BDR) && !(cur.before & BDR))erase_element(markers, cur.pos); if(!(cur.after & BDR) && (cur.before & BDR)){ //if there was border and now not - make as it was before; erase_element(markers, cur.pos); markers.push_back(cur.pos); }; mask[cur.pos] = cur.before; //undo, if matches //TODO: process markers properly }; }; };
void resize(size_t reserve) { while (!m_lru.empty()) { data_t *raw = &m_lru.front(); erase_element(raw); /* break early if free space in cache more than requested reserve */ if (m_max_cache_size - m_cache_size > reserve) break; } }
void write(const unsigned char *id, size_t lifetime, const char *data, size_t size, bool remove_from_disk) { boost::mutex::scoped_lock guard(m_lock); iset_t::iterator it = m_set.find(id); if (it != m_set.end()) erase_element(&(*it)); if (size + m_cache_size > m_max_cache_size) resize(size * 2); /* * nothing throws exception below this 'new' operator, so there is no try/catch block */ data_t *raw = new data_t(id, lifetime, data, size, remove_from_disk); m_set.insert(*raw); m_lru.push_back(*raw); if (lifetime) m_lifeset.insert(*raw); m_cache_size += size; }