VPtr lookup_or_create(const K &key) { VPtr val; list<VPtr> to_release; { Mutex::Locker l(lock); bool retry = false; do { retry = false; typename map<K, pair<WeakVPtr, V*>, C>::iterator i = weak_refs.find(key); if (i != weak_refs.end()) { val = i->second.first.lock(); if (val) { lru_add(key, val, &to_release); return val; } else { retry = true; } } if (retry) cond.Wait(lock); } while (retry); V *new_value = new V(); VPtr new_val(new_value, Cleanup(this, key)); weak_refs.insert(make_pair(key, make_pair(new_val, new_value))); lru_add(key, new_val, &to_release); return new_val; } }
VPtr lower_bound(const K& key) { VPtr val; list<VPtr> to_release; { Mutex::Locker l(lock); bool retry = false; do { retry = false; if (weak_refs.empty()) break; typename map<K, WeakVPtr>::iterator i = weak_refs.lower_bound(key); if (i == weak_refs.end()) --i; val = i->second.lock(); if (val) { lru_add(i->first, val, &to_release); } else { retry = true; } if (retry) cond.Wait(lock); } while (retry); } return val; }
VPtr lookup(const K& key) { VPtr val; list<VPtr> to_release; { Mutex::Locker l(lock); ++waiting; bool retry = false; do { retry = false; typename map<K, pair<WeakVPtr, V*>, C>::iterator i = weak_refs.find(key); if (i != weak_refs.end()) { val = i->second.first.lock(); if (val) { lru_add(key, val, &to_release); } else { retry = true; } } if (retry) cond.Wait(lock); } while (retry); --waiting; } return val; }
static int range_alloc(struct ashmem_area *asma, struct ashmem_range *prev_range, unsigned int purged, size_t start, size_t end) { struct ashmem_range *range; range = kmalloc(sizeof(struct ashmem_range)); memset(range, 0, sizeof(struct ashmem_range)); if(!range) { return -ENOMEM; } range->asma = asma; range->pgstart = start; range->pgend = end; range->purged = purged; list_add_before(&prev_range->unpinned, &range->unpinned); if(range_on_lru(range)) { lru_add(range); } return 0; }
/*** * Inserts a key if not present, or bumps it to the front of the LRU if * it is, and then gives you a reference to the value. If the key already * existed, you are responsible for deleting the new value you tried to * insert. * * @param key The key to insert * @param value The value that goes with the key * @param existed Set to true if the value was already in the * map, false otherwise * @return A reference to the map's value for the given key */ VPtr add(const K& key, V *value, bool *existed = NULL) { VPtr val; list<VPtr> to_release; { Mutex::Locker l(lock); typename map<K, pair<WeakVPtr, V*>, C>::iterator actual = weak_refs.lower_bound(key); if (actual != weak_refs.end() && actual->first == key) { if (existed) *existed = true; return actual->second.first.lock(); } if (existed) *existed = false; val = VPtr(value, Cleanup(this, key)); weak_refs.insert(actual, make_pair(key, make_pair(val, value))); lru_add(key, val, &to_release); } return val; }
void object_cache_validate(u64 oid) { u64 *key; if (objectroot == NULL) return; key = GC_NEW_ATOMIC(u64); assert(key != NULL); *key = oid; lru_add(object_cache_status, key, key); }
VPtr add(K key, V *value) { VPtr val(value, Cleanup(this, key)); list<VPtr> to_release; { Mutex::Locker l(lock); weak_refs.insert(make_pair(key, val)); lru_add(key, val, &to_release); } return val; }
/* Move up the given lru element */ int lru_touch(lru_list_t *llist, index_t index, u_int64_t key) { if (!llist || (index >= llist->ll_max)) return -EINVAL; if (llist->ll_tail == index) llist->ll_elem[index].le_key = key; else { lru_rem(llist, index); lru_add(llist, index, key); } return 0; }
int addcache(dev_t dev, u64_t dev_off, ino_t ino, u64_t ino_off, struct phys_block *pb) { int hv_dev; struct cached_page *hb; if(pb->flags & PBF_INCACHE) { printf("VM: already in cache\n"); return EINVAL; } if(!SLABALLOC(hb)) { printf("VM: no memory for cache node\n"); return ENOMEM; } assert(dev != NO_DEV); #if CACHE_SANITY assert(!find_cached_page_bydev(dev, dev_off, ino, ino_off)); #endif hb->dev = dev; hb->dev_offset = dev_off; hb->ino = ino; hb->ino_offset = ino_off; hb->page = pb; hb->page->refcount++; /* block also referenced by cache now */ hb->page->flags |= PBF_INCACHE; hv_dev = makehash(dev, dev_off); hb->hash_next_dev = cache_hash_bydev[hv_dev]; cache_hash_bydev[hv_dev] = hb; if(hb->ino != VMC_NO_INODE) addcache_byino(hb); lru_add(hb); return OK; }
VPtr lookup(K key) { VPtr val; list<VPtr> to_release; { Mutex::Locker l(lock); bool retry = false; do { retry = false; if (weak_refs.count(key)) { val = weak_refs[key].lock(); if (val) { lru_add(key, val, &to_release); } else { retry = true; } } if (retry) cond.Wait(lock); } while (retry); } return val; }
void cache_lru_touch(struct cached_page *hb) { lru_rm(hb); lru_add(hb); }