void LocalityFinder::GetLocality(m2::PointD const & pt, string & name) { Cache * working = nullptr; // Find suitable cache that includes needed point. for (auto & cache : m_caches) { if (cache.m_rect.IsPointInside(pt)) { working = &cache; break; } } if (working == nullptr) { // Find most unused cache. size_t minUsage = numeric_limits<size_t>::max(); for (auto & cache : m_caches) { if (cache.m_usage < minUsage) { working = &cache; minUsage = cache.m_usage; } } ASSERT(working, ()); working->Clear(); }
void rot(const T& x,const T& y,const T& z,const T& t) { if( t != T(0) && ( x != T(0) || y != T(0) || z != T(0) ) ) { m_quat *= Quaternion<T>(x,y,z,t); m_cache.invalidate(); m_inv_cache.invalidate(); } }
Document *Cache::getCache( const Key &key ) { Cache *pool = getCachePool( key.cacheKey() ); if ( pool == NULL ) return NULL; return pool->get( key ); }
pair<double,double *> NetworkEffect::statistic(const Network * pSummationTieNetwork, bool needActorStatistics) { this->initializeStatisticCalculation(); int n = pSummationTieNetwork->n(); Cache * pCache = this->pCache(); double statistic = 0; double * actorStatistics = 0; if(needActorStatistics) { actorStatistics = new double[n]; } for (int i = 0; i < n; i++) { pCache->initialize(i); this->preprocessEgo(i); this->onNextEgo(i); if(needActorStatistics) { actorStatistics[i] = this->egoStatistic(i, pSummationTieNetwork); statistic += actorStatistics[i]; } else { statistic += this->egoStatistic(i, pSummationTieNetwork); } } this->cleanupStatisticCalculation(); //Rprintf(" %f sum \n ", statistic); return make_pair(statistic,actorStatistics); }
bool Cache :: line_make_owner_in_child_caches(Line *line, unsigned child_index) { NVLOG1("%s\tline_make_owner_in_child_caches 0x%lx sharers %lx caller %d\n", this->_name.c_str(), line->addr, line->sharers, child_index); uint8_t __attribute__((unused)) line_state_orig = line->state; uint64_t __attribute__((unused)) line_sharers_orig = line->sharers; // evict the line in all but the requesting child cache for (size_t child_i = 0; child_i<_children.size() && line->sharers>0; child_i++) { Cache *child = dynamic_cast<Cache *>(_children[child_i]); assert(child!=NULL); // NVLOG1("%s\tchecking for 0x%lx %lx 1\n", child->_name.c_str(), line->addr, line->sharers); if (!bit(line->sharers, child_i)) continue; // a child is not a sharer, so continue // NVLOG1("%s\tchecking for 0x%lx %lx 2\n", child->_name.c_str(), line->addr, line->sharers); if (child_i == child_index) continue; // skip the child cache that called us // NVLOG1("%s\tchecking for 0x%lx %lx 3\n", child->_name.c_str(), line->addr, line->sharers); for (Addr line_addr_iter=line->addr; line_addr_iter<line->addr+get_line_size(); line_addr_iter += child->get_line_size()) { NVLOG1("%s\tis line sharer, checking segment 0x%lx\n", child->_name.c_str(), line_addr_iter); Line *child_line = child->addr2line_internal(line_addr_iter); if (!child_line) continue; NVLOG1("%s\thas segment 0x%lx, evicting\n", child->_name.c_str(), line_addr_iter); child->line_evict(child_line); } } line->sharers = 0; setbit(line->sharers, child_index); NVLOG1("%s\tline 0x%lx\t state %s->%s sharers 0x%lx->0x%lx\n", _name.c_str(), line->addr, state2str(line_state_orig).c_str(), state2str(line->state).c_str(), line_sharers_orig, line->sharers); return true; }
//override void initialize( const osgDB::Options* dbOptions ) { _dbOptions = dbOptions ? osg::clone(dbOptions) : 0L; if ( _dbOptions.valid() ) { // Set up a Custom caching bin for this source: Cache* cache = Cache::get( _dbOptions.get() ); if ( cache ) { Config optionsConf = _options.getConfig(); std::string binId = Stringify() << std::hex << hashString(optionsConf.toJSON()) << "_tfs"; _cacheBin = cache->addBin( binId ); // write a metadata record just for reference purposes.. we don't actually use it Config metadata = _cacheBin->readMetadata(); if ( metadata.empty() ) { _cacheBin->writeMetadata( optionsConf ); } if ( _cacheBin.valid() ) { _cacheBin->store( _dbOptions.get() ); } } } _layerValid = TFSReader::read(_options.url().get(), _dbOptions.get(), _layer); if (_layerValid) { OE_INFO << LC << "Read layer TFS " << _layer._title << " " << _layer._abstract << " " << _layer._firstLevel << " " << _layer._maxLevel << " " << _layer._extent.toString() << std::endl; } }
void CollectionList::loadNextBatchCachedItems() { Cache *cache = Cache::instance(); bool done = false; for(int i = 0; i < 20; ++i) { FileHandle cachedItem(cache->loadNextCachedItem()); if(cachedItem.isNull()) { done = true; break; } // This may have already been created via a loaded playlist. if(!m_itemsDict.contains(cachedItem.absFilePath())) { CollectionListItem *newItem = new CollectionListItem(this, cachedItem); setupItem(newItem); } } SplashScreen::update(); if(!done) { QTimer::singleShot(0, this, SLOT(loadNextBatchCachedItems())); } else { completedLoadingCachedItems(); } }
operator TextCache::Result() const { #ifdef ENABLE_OPENGL return texture; #else return { data, width, width, height }; #endif }
bool DoubleCache::put(Collection& coll, Timestamp timestamp, const vector< TypedValue >& data, CacheTask** ppCacheTask) { bool b = m_current->put(timestamp, data); if (!b) { Cache* cache = (m_current == m_first) ? m_second : m_first; b = cache->put(timestamp, data); if (!b) return false; m_filled = m_current; m_current = cache; string filePath; Uint offset; CacheTask* task = new CacheTask; coll.prepareStorageQueueWrite((uint64_t)task, filePath, offset); task->set(coll.id(), filePath, offset); if (ppCacheTask != NULL) *ppCacheTask = task; else MainQueue::put(task); } return true; }
void Cache :: line_writer_to_sharer(Line *line, size_t &latency, bool children_only) { // make this cache (and child caches) // only sharers of the line (downgrade from a writer) NVLOG1("%s\tline_writer_to_sharer 0x%lx +%ld cycles\n", this->_name.c_str(), line->addr, _hit_latency); latency += _hit_latency; this->stats.writebacks_inc(); for (size_t child_i = 0; child_i<_children.size(); child_i++) { if (!bit(line->sharers, child_i)) continue; // a child is not a sharer, so continue Cache *child = dynamic_cast<Cache *>(_children[child_i]); assert(child!=NULL); for (Addr line_addr_iter=line->addr; line_addr_iter<line->addr+get_line_size(); line_addr_iter += child->get_line_size()) { if (line_addr_iter!=line->addr) { // also measure the latency for other line segments NVLOG1("%s\tline_writer_to_sharer 0x%lx +%ld cycles\n", this->_name.c_str(), line_addr_iter, _hit_latency); latency += _hit_latency; // response from child to parent this->stats.writebacks_inc(); } Line *child_line = child->addr2line_internal(line_addr_iter); if (!child_line) continue; child->line_writer_to_sharer(child_line, latency); } } if (!children_only) { this->line_data_writeback(line); line->state &= ~(LINE_MOD | LINE_EXC); line->state |= LINE_SHR; } NVLOG1("%s\t0x%lx\t new state %s sharers %lx\n", _name.c_str(), line->addr, state2str(line->state).c_str(), line->sharers ); }
TEST(InnerNode, serialize) { Options opts; opts.comparator = new LexicalComparator(); opts.inner_node_msg_count = 4; opts.inner_node_children_number = 2; opts.leaf_node_record_count = 4; Directory *dir = new RAMDirectory(); AIOFile *file = dir->open_aio_file("tree_test"); Layout *layout = new Layout(file, 0, opts); ASSERT_TRUE(layout->init(true)); Cache *cache = new Cache(opts); ASSERT_TRUE(cache->init()); Tree *tree = new Tree("", opts, cache, layout); ASSERT_TRUE(tree->init()); char buffer[40960]; Block blk(Slice(buffer, 40960), 0, 0); BlockReader reader(&blk); BlockWriter writer(&blk); InnerNode n1("", NID_START, tree); n1.bottom_ = true; n1.first_child_ = NID_LEAF_START; n1.first_msgbuf_ = new MsgBuf(opts.comparator); PUT(*n1.first_msgbuf_, "a", "1"); PUT(*n1.first_msgbuf_, "b", "1"); PUT(*n1.first_msgbuf_, "c", "1"); n1.pivots_.resize(1); n1.pivots_[0].key = Slice("d").clone(); n1.pivots_[0].child = NID_LEAF_START + 1; n1.pivots_[0].msgbuf = new MsgBuf(opts.comparator); size_t skeleton_size; EXPECT_TRUE(n1.write_to(writer, skeleton_size) == true); InnerNode n2("", NID_START, tree); EXPECT_TRUE(n2.read_from(reader, false) == true); EXPECT_TRUE(n2.bottom_ == true); EXPECT_EQ(NID_LEAF_START, n2.first_child_); EXPECT_TRUE(n2.first_msgbuf_ != NULL); EXPECT_EQ(3U, n2.first_msgbuf_->count()); CHK_MSG(n2.first_msgbuf_->get(0), Put, "a", "1"); CHK_MSG(n2.first_msgbuf_->get(1), Put, "b", "1"); CHK_MSG(n2.first_msgbuf_->get(2), Put, "c", "1"); EXPECT_EQ(1U, n2.pivots_.size()); EXPECT_EQ("d", n2.pivots_[0].key); EXPECT_EQ(NID_LEAF_START+1, n2.pivots_[0].child); EXPECT_TRUE(n2.pivots_[0].msgbuf != NULL); EXPECT_EQ(0U, n2.pivots_[0].msgbuf->count()); delete tree; delete cache; delete layout; delete file; delete dir; delete opts.comparator; }
void Cache :: line_evict(Line *line) { NVLOG1("%s\tline_evict addr 0x%lx data 0x%lx\n", this->_name.c_str(), line->addr, (Addr)line->pdata); // evict in all child caches for (size_t child_i = 0; child_i<_children.size() && line->sharers>0; child_i++) { if (!bit(line->sharers, child_i)) continue; // a child is not a sharer, so continue Cache *child = dynamic_cast<Cache *>(_children[child_i]); assert(child!=NULL); for (Addr line_addr_iter=line->addr; line_addr_iter<line->addr+get_line_size(); line_addr_iter += child->get_line_size()) { Line *child_line = child->addr2line_internal(line_addr_iter); if (child_line == NULL) continue; child->line_evict(child_line); } } #ifdef HAS_HTM if (pprocessor) { pprocessor->cb_line_evicted(line); } #endif if (!_parent_cache) { // notify the processor of the line removal #ifdef HAS_HTM assert(pprocessor); pprocessor->cb_line_evicted(line); #endif } this->line_data_writeback(line); // check if there is any data to writeback free(line->pdata); line->pdata = NULL; // remove in this cache as well this->line_rm(line); }
/** * Example for SQL-based fields queries that return only required * fields instead of whole key-value pairs. * * Note that SQL Fields Query can only be performed using fields that have been * listed in "QueryEntity" been of the config. */ void DoSqlFieldsQueryWithJoin() { Cache<int64_t, Person> cache = Ignition::Get().GetCache<int64_t, Person>(PERSON_CACHE); // Execute query to get names of all employees. std::string sql( "select concat(firstName, ' ', lastName), org.name " "from Person, \"Organization\".Organization as org " "where Person.orgId = org._key"); QueryFieldsCursor cursor = cache.Query(SqlFieldsQuery(sql)); // Print persons' names and organizations' names. std::cout << "Names of all employees and organizations they belong to: " << std::endl; // In this particular case each row will have two elements with full name // of an employees and their organization. while (cursor.HasNext()) { QueryFieldsRow row = cursor.GetNext(); std::cout << row.GetNext<std::string>() << ", "; std::cout << row.GetNext<std::string>() << std::endl; } std::cout << std::endl; }
/** * Example for SQL queries to calculate average salary for a specific organization. * * Note that SQL Fields Query can only be performed using fields that have been * listed in "QueryEntity" been of the config. */ void DoSqlQueryWithAggregation() { Cache<int64_t, Person> cache = Ignition::Get().GetCache<int64_t, Person>(PERSON_CACHE); // Calculate average of salary of all persons in ApacheIgnite. // Note that we also join on Organization cache as well. std::string sql( "select avg(salary) " "from Person, \"Organization\".Organization as org " "where Person.orgId = org._key " "and lower(org.name) = lower(?)"); SqlFieldsQuery qry(sql); qry.AddArgument<std::string>("ApacheIgnite"); QueryFieldsCursor cursor = cache.Query(qry); // Calculate average salary for a specific organization. std::cout << "Average salary for 'ApacheIgnite' employees: " << std::endl; while (cursor.HasNext()) std::cout << cursor.GetNext().GetNext<double>() << std::endl; std::cout << std::endl; }
/** * Example for TEXT queries using LUCENE-based indexing of people's resumes. * * Note that to be able to do so you have to add FULLTEXT index for the 'resume' * field of the Person type. See config for details. */ void DoTextQuery() { Cache<int64_t, Person> cache = Ignition::Get().GetCache<int64_t, Person>(PERSON_CACHE); typedef std::vector< CacheEntry<int64_t, Person> > ResVector; // Query for all people with "Master" in their resumes. ResVector masters; cache.Query(TextQuery(PERSON_TYPE, "Master")).GetAll(masters); // Query for all people with "Bachelor" in their resumes. ResVector bachelors; cache.Query(TextQuery(PERSON_TYPE, "Bachelor")).GetAll(bachelors); std::cout << "Following people have 'Master' in their resumes: " << std::endl; // Printing first result set. for (ResVector::const_iterator i = masters.begin(); i != masters.end(); ++i) std::cout << i->GetKey() << " : " << i->GetValue().ToString() << std::endl; std::cout << std::endl; std::cout << "Following people have 'Bachelor' in their resumes: " << std::endl; // Printing second result set. for (ResVector::const_iterator i = bachelors.begin(); i != bachelors.end(); ++i) std::cout << i->GetKey() << " : " << i->GetValue().ToString() << std::endl; std::cout << std::endl; }
/* * Execute bulk Put and Get operations. */ void PutGetAll(Cache<int, Organization>& cache) { // Create new Organizations to store in cache. Organization org1("Microsoft", Address("1096 Eddy Street, San Francisco, CA", 94109)); Organization org2("Red Cross", Address("184 Fidler Drive, San Antonio, TX", 78205)); // Put created data entries to cache. std::map<int, Organization> vals; vals[1] = org1; vals[2] = org2; cache.PutAll(vals); // Get recently created organizations as a strongly-typed fully de-serialized instances. std::set<int> keys; keys.insert(1); keys.insert(2); std::map<int, Organization> valsFromCache = cache.GetAll(keys); std::cout << ">>> Retrieved organization instances from cache: " << std::endl; for (std::map<int, Organization>::iterator it = valsFromCache.begin(); it != valsFromCache.end(); ++it) std::cout << it->second.ToString() << std::endl; std::cout << std::endl; }
void Cache::clearByBucket( DB::VBucket *bucket ) { for ( uint8_t i=0; i < Key::lockSize; i++ ) { Cache *cache = pool[i]; cache->wlock(); std::list<Document*>::iterator it = cache->list.begin(); while ( it != cache->list.end() ) { Document *doc = *it; if ( doc->bucket == bucket ) { cache->data.erase( doc->getKey().str() ); cache->list.erase( it++ ); doc->wlock(); if ( doc->changed() ) doc->save(); doc->unlock(); delete doc; Status::cacheOutRetain(); continue; } ++it; } cache->unlock(); } }
inline void CertificateCacheTtl::removeAll() { for(Cache::iterator it = m_cache.begin(); it != m_cache.end(); it++) m_scheduler.cancelEvent(it->second.second); m_cache.clear(); }
inline void CertificateCacheTtl::remove(const Name& certificateName) { Name name = certificateName.getPrefix(-1); Cache::iterator it = m_cache.find(name); if (it != m_cache.end()) m_cache.erase(it); }
void rot(const vecn<T,3>& n,const T& t) { if( t != T(0) && ( n.x() != T(0) || n.y() != T(0) || n.z() != T(0) ) ) { //assert(n.length()); m_quat *= Quaternion<T>(n,t); m_cache.invalidate(); m_inv_cache.invalidate(); } }
TEST(InnerNode, add_pivot) { Options opts; opts.comparator = new LexicalComparator(); opts.inner_node_children_number = 4; Directory *dir = new RAMDirectory(); AIOFile *file = dir->open_aio_file("tree_test"); Layout *layout = new Layout(file, 0, opts); ASSERT_TRUE(layout->init(true)); Cache *cache = new Cache(opts); ASSERT_TRUE(cache->init()); Tree *tree = new Tree("", opts, cache, layout); ASSERT_TRUE(tree->init()); InnerNode *n1 = tree->new_inner_node(); n1->bottom_ = true; n1->first_child_ = NID_START + 100; n1->first_msgbuf_ = new MsgBuf(opts.comparator); n1->msgcnt_ = 0; n1->msgbufsz_ = n1->first_msgbuf_->size(); std::vector<DataNode*> path; path.push_back(n1); n1->inc_ref(); n1->write_lock(); n1->add_pivot("e", NID_START + 101, path); EXPECT_EQ(1U, n1->pivots_.size()); EXPECT_EQ("e", n1->pivots_[0].key); path.push_back(n1); n1->inc_ref(); n1->write_lock(); n1->add_pivot("d", NID_START + 102, path); EXPECT_EQ(2U, n1->pivots_.size()); EXPECT_EQ("d", n1->pivots_[0].key); EXPECT_EQ("e", n1->pivots_[1].key); path.push_back(n1); n1->inc_ref(); n1->write_lock(); n1->add_pivot("f", NID_START + 103, path); EXPECT_EQ(3U, n1->pivots_.size()); EXPECT_EQ("d", n1->pivots_[0].key); EXPECT_EQ("e", n1->pivots_[1].key); EXPECT_EQ("f", n1->pivots_[2].key); n1->dec_ref(); delete tree; delete cache; delete layout; delete file; delete dir; delete opts.comparator; }
void Reader::garbageCollect() { images.garbageCollect(); tiles.garbageCollect(); sounds.garbageCollect(); // songs.garbageCollect(); xmls.garbageCollect(); texts.garbageCollect(); }
void WebCache::setCapacities( size_t minDeadCapacity, size_t maxDeadCapacity, size_t capacity) { Cache* cache = WebCore::cache(); if (cache) cache->setCapacities(static_cast<unsigned int>(minDeadCapacity), static_cast<unsigned int>(maxDeadCapacity), static_cast<unsigned int>(capacity)); }
static void ClearCacheTimer(uv_timer_t *req) { uint64_t now = time(0); Cache::iterator itc = _cache.begin(); while (itc != _cache.end()) { if (itc->second.expire > 0 && itc->second.expire < now) itc->second.clear(0); itc++; } }
inline shared_ptr<const IdentityCertificate> CertificateCacheTtl::getCertificate(const Name& certificateName) { Cache::iterator it = m_cache.find(certificateName); if (it != m_cache.end()) return it->second.first; else return shared_ptr<IdentityCertificate>(); }
// returns how well IKSyntherError synther_error(MetaData&metadata) { static Cache<IKSyntherError> error_cache; return error_cache.get( metadata.get_filename(), [&](){ return do_synther_error(metadata); }); }
static void cache_node_free (gpointer node_data) { CacheNode *node = (CacheNode *) node_data; Cache *cache = node->cache; cache->free_node (node); g_free (node->key); g_slice_free1 (cache->node_size, node); }
std::string Reader::getText(const std::string& name) { StringRef existing = texts.momentaryRequest(name); if (existing) return *existing.get(); StringRef result(new std::string(readString(name))); texts.momentaryPut(name, result); return *result.get(); }
//override void initialize( const osgDB::Options* dbOptions ) { FeatureSource::initialize( dbOptions ); _dbOptions = dbOptions ? osg::clone(dbOptions) : 0L; if ( _dbOptions.valid() ) { // Set up a Custom caching bin for this source: Cache* cache = Cache::get( _dbOptions.get() ); if ( cache ) { Config optionsConf = _options.getConfig(); std::string binId = Stringify() << std::hex << hashString(optionsConf.toJSON()) << "_wfs"; _cacheBin = cache->addBin( binId ); _cacheBin->setHashKeys(true); // write a metadata record just for reference purposes.. we don't actually use it Config metadata = _cacheBin->readMetadata(); if ( metadata.empty() ) { _cacheBin->writeMetadata( optionsConf ); } if ( _cacheBin.valid() ) { _cacheBin->apply( _dbOptions.get() ); } } } std::string capUrl; if ( _options.url().isSet() ) { char sep = _options.url()->full().find_first_of('?') == std::string::npos? '?' : '&'; capUrl = _options.url()->full() + sep + "SERVICE=WFS&VERSION=1.0.0&REQUEST=GetCapabilities"; } _capabilities = WFSCapabilitiesReader::read( capUrl, _dbOptions.get() ); if ( !_capabilities.valid() ) { OE_WARN << "[osgEarth::WFS] Unable to read WFS GetCapabilities." << std::endl; //return; } else { OE_INFO << "[osgEarth::WFS] Got capabilities from " << capUrl << std::endl; } }
void TrlCache::getTranslations( DataText* translations, wxString const& text, wxLanguage lgsrc, wxLanguage lgto) { //Si le cache n'existe on fait rien. if(!ManCache::get().existCache(lgsrc, lgto)) return; Cache cache = ManCache::get().getCache(lgsrc, lgto); cache.getData(text, translations); }