/* * * slotsmgrttagslot host port timeout slot * */ void slotsmgrttagslotCommand(redisClient *c) { sds host = c->argv[1]->ptr; sds port = c->argv[2]->ptr; int timeout, slot; if (parse_timeout(c, c->argv[3], &timeout) != 0) { return; } if (parse_slot(c, c->argv[4], &slot) != 0) { return; } dict *d = c->db->hash_slots[slot]; int succ = 0; do { const dictEntry *de = dictGetRandomKey(d); if (de == NULL) { break; } sds skey = dictGetKey(de); robj *key = createStringObject(skey, sdslen(skey)); succ = slotsmgrttag_command(c, host, port, timeout, key); decrRefCount(key); if (succ < 0) { return; } } while (0); addReplyMultiBulkLen(c, 2); addReplyLongLong(c, succ); addReplyLongLong(c, dictSize(d)); }
/* Return random element from a non empty set. * The returned element can be a int64_t value if the set is encoded * as an "intset" blob of integers, or a redis object if the set * is a regular set. * * The caller provides both pointers to be populated with the right * object. The return value of the function is the object->encoding * field of the object and is used by the caller to check if the * int64_t pointer or the redis object pointere was populated. * * When an object is returned (the set was a real set) the ref count * of the object is not incremented so this function can be considered * copy on write friendly. */ int setTypeRandomElement(robj *setobj, robj **objele, int64_t *llele) { if (setobj->encoding == REDIS_ENCODING_HT) { dictEntry *de = dictGetRandomKey(setobj->ptr); *objele = dictGetEntryKey(de); } else if (setobj->encoding == REDIS_ENCODING_INTSET) { *llele = intsetRandom(setobj->ptr); } else { redisPanic("Unknown set encoding"); } return setobj->encoding; }
const sds dbGetRandSet(void) { dictEntry *entry = NULL; sds result = NULL; lockRead(sets); if (NULL != (entry = dictGetRandomKey(sets))) { result = (sds) entry->key; } unlockRead(sets); return result; }
/* Return random element from a non empty set. * The returned element can be a int64_t value if the set is encoded * as an "intset" blob of integers, or a redis object if the set * is a regular set. * * The caller provides both pointers to be populated with the right * object. The return value of the function is the object->encoding * field of the object and is used by the caller to check if the * int64_t pointer or the redis object pointer was populated. * * Note that both the objele and llele pointers should be passed and cannot * be NULL since the function will try to defensively populate the non * used field with values which are easy to trap if misused. * * When an object is returned (the set was a real set) the ref count * of the object is not incremented so this function can be considered * copy on write friendly. */ int setTypeRandomElement(robj *setobj, robj **objele, int64_t *llele) { if (setobj->encoding == REDIS_ENCODING_HT) { dictEntry *de = dictGetRandomKey(setobj->ptr); *objele = dictGetKey(de); *llele = -123456789; /* Not needed. Defensive. */ } else if (setobj->encoding == REDIS_ENCODING_INTSET) { *llele = intsetRandom(setobj->ptr); *objele = NULL; /* Not needed. Defensive. */ } else { redisPanic("Unknown set encoding"); } return setobj->encoding; }
/* Try to share an object against the shared objects pool */ static robj *tryObjectSharing(robj *o) { struct dictEntry *de; unsigned long c; if (o == NULL || server.shareobjects == 0) return o; de = dictFind(server.sharingpool,o); if (de) { robj *shared = dictGetEntryKey(de); c = ((unsigned long) dictGetEntryVal(de))+1; dictGetEntryVal(de) = (void*) c; incrRefCount(shared); decrRefCount(o); return shared; } else { /* Here we are using a stream algorihtm: Every time an object is * shared we increment its count, everytime there is a miss we * recrement the counter of a random object. If this object reaches * zero we remove the object and put the current object instead. */ if (dictSize(server.sharingpool) >= server.sharingpoolsize) { de = dictGetRandomKey(server.sharingpool); c = ((unsigned long) dictGetEntryVal(de))-1; dictGetEntryVal(de) = (void*) c; if (c == 0) { dictDelete(server.sharingpool,de->key); } } else { c = 0; /* If the pool is empty we want to add this object */ } if (c == 0) { int retval; retval = dictAdd(server.sharingpool,o,(void*)1); redisAssert(retval == DICT_OK); incrRefCount(o); } return o; } }
/* Return a random key, in form of a Redis object. * If there are no keys, NULL is returned. *随机key从数据库中 * The function makes sure to return keys not already expired. */ robj *dbRandomKey(redisDb *db) { struct dictEntry *de; while(1) { sds key; robj *keyobj; de = dictGetRandomKey(db->dict); if (de == NULL) return NULL; key = dictGetKey(de); keyobj = createStringObject(key,sdslen(key)); if (dictFind(db->expires,key)) {//判断键是否过期 if (expireIfNeeded(db,keyobj)) {//如果过期 decrRefCount(keyobj);//减少key的引用计数 continue; /* search for another key. This expired. */ } } return keyobj; } }
/* Return a random key, in form of a Redis object. * If there are no keys, NULL is returned. * * The function makes sure to return keys not already expired. */ robj *dbRandomKey(redisDb *db) { struct dictEntry *de; while(1) { sds key; robj *keyobj; de = dictGetRandomKey(db->dict); if (de == NULL) return NULL; key = dictGetEntryKey(de); keyobj = createStringObject(key,sdslen(key),sdslogiclock(key),sdsversion(key)); if (dictFind(db->expires,key)) { if (expireIfNeeded(db,keyobj)) { decrRefCount(keyobj); continue; /* search for another key. This expired. */ } } return keyobj; } }
void srandmemberWithCountCommand(redisClient *c) { long l; unsigned long count, size; int uniq = 1; robj *set, *ele; int64_t llele; int encoding; dict *d; if (getLongFromObjectOrReply(c,c->argv[2],&l,NULL) != REDIS_OK) return; if (l >= 0) { count = (unsigned) l; } else { /* A negative count means: return the same elements multiple times * (i.e. don't remove the extracted element after every extraction). */ count = -l; uniq = 0; } if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.emptymultibulk)) == NULL || checkType(c,set,REDIS_SET)) return; size = setTypeSize(set); /* If count is zero, serve it ASAP to avoid special cases later. */ if (count == 0) { addReply(c,shared.emptymultibulk); return; } /* CASE 1: The count was negative, so the extraction method is just: * "return N random elements" sampling the whole set every time. * This case is trivial and can be served without auxiliary data * structures. */ if (!uniq) { addReplyMultiBulkLen(c,count); while(count--) { encoding = setTypeRandomElement(set,&ele,&llele); if (encoding == REDIS_ENCODING_INTSET) { addReplyBulkLongLong(c,llele); } else { addReplyBulk(c,ele); } } return; } /* CASE 2: * The number of requested elements is greater than the number of * elements inside the set: simply return the whole set. */ if (count >= size) { sunionDiffGenericCommand(c,c->argv+1,1,NULL,REDIS_OP_UNION); return; } /* For CASE 3 and CASE 4 we need an auxiliary dictionary. */ d = dictCreate(&setDictType,NULL); /* CASE 3: * The number of elements inside the set is not greater than * SRANDMEMBER_SUB_STRATEGY_MUL times the number of requested elements. * In this case we create a set from scratch with all the elements, and * subtract random elements to reach the requested number of elements. * * This is done because if the number of requsted elements is just * a bit less than the number of elements in the set, the natural approach * used into CASE 3 is highly inefficient. */ if (count*SRANDMEMBER_SUB_STRATEGY_MUL > size) { setTypeIterator *si; /* Add all the elements into the temporary dictionary. */ si = setTypeInitIterator(set); while((encoding = setTypeNext(si,&ele,&llele)) != -1) { int retval = DICT_ERR; if (encoding == REDIS_ENCODING_INTSET) { retval = dictAdd(d,createStringObjectFromLongLong(llele),NULL); } else if (ele->encoding == REDIS_ENCODING_RAW) { retval = dictAdd(d,dupStringObject(ele),NULL); } else if (ele->encoding == REDIS_ENCODING_INT) { retval = dictAdd(d, createStringObjectFromLongLong((long)ele->ptr),NULL); } redisAssert(retval == DICT_OK); } setTypeReleaseIterator(si); redisAssert(dictSize(d) == size); /* Remove random elements to reach the right count. */ while(size > count) { dictEntry *de; de = dictGetRandomKey(d); dictDelete(d,dictGetKey(de)); size--; } } /* CASE 4: We have a big set compared to the requested number of elements. * In this case we can simply get random elements from the set and add * to the temporary set, trying to eventually get enough unique elements * to reach the specified count. */ else { unsigned long added = 0; while(added < count) { encoding = setTypeRandomElement(set,&ele,&llele); if (encoding == REDIS_ENCODING_INTSET) { ele = createStringObjectFromLongLong(llele); } else if (ele->encoding == REDIS_ENCODING_RAW) { ele = dupStringObject(ele); } else if (ele->encoding == REDIS_ENCODING_INT) { ele = createStringObjectFromLongLong((long)ele->ptr); } /* Try to add the object to the dictionary. If it already exists * free it, otherwise increment the number of objects we have * in the result dictionary. */ if (dictAdd(d,ele,NULL) == DICT_OK) added++; else decrRefCount(ele); } } /* CASE 3 & 4: send the result to the user. */ { dictIterator *di; dictEntry *de; addReplyMultiBulkLen(c,count); di = dictGetIterator(d); while((de = dictNext(di)) != NULL) addReplyBulk(c,dictGetKey(de)); dictReleaseIterator(di); dictRelease(d); } }
int freeMemoryIfNeeded(void) { size_t mem_reported, mem_used, mem_tofree, mem_freed; int slaves = listLength(server.slaves); mstime_t latency, eviction_latency; long long delta; /* Check if we are over the memory usage limit. If we are not, no need * to subtract the slaves output buffers. We can just return ASAP. */ mem_reported = zmalloc_used_memory(); if (mem_reported <= server.maxmemory) return C_OK; /* Remove the size of slaves output buffers and AOF buffer from the * count of used memory. */ mem_used = mem_reported; if (slaves) { listIter li; listNode *ln; listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = listNodeValue(ln); unsigned long obuf_bytes = getClientOutputBufferMemoryUsage(slave); if (obuf_bytes > mem_used) mem_used = 0; else mem_used -= obuf_bytes; } } if (server.aof_state != AOF_OFF) { mem_used -= sdslen(server.aof_buf); mem_used -= aofRewriteBufferSize(); } /* Check if we are still over the memory limit. */ if (mem_used <= server.maxmemory) return C_OK; /* Compute how much memory we need to free. */ mem_tofree = mem_used - server.maxmemory; mem_freed = 0; if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION) goto cant_free; /* We need to free memory, but policy forbids. */ latencyStartMonitor(latency); while (mem_freed < mem_tofree) { int j, k, i, keys_freed = 0; static int next_db = 0; sds bestkey = NULL; int bestdbid; redisDb *db; dict *dict; dictEntry *de; if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_LRU || server.maxmemory_policy == MAXMEMORY_VOLATILE_LRU) { struct evictionPoolEntry *pool = EvictionPoolLRU; while(bestkey == NULL) { unsigned long total_keys = 0, keys; /* We don't want to make local-db choices when expiring keys, * so to start populate the eviction pool sampling keys from * every DB. */ for (i = 0; i < server.dbnum; i++) { db = server.db+i; dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_LRU) ? db->dict : db->expires; if ((keys = dictSize(dict)) != 0) { evictionPoolPopulate(i, dict, db->dict, pool); total_keys += keys; } } if (!total_keys) break; /* No keys to evict. */ /* Go backward from best to worst element to evict. */ for (k = EVPOOL_SIZE-1; k >= 0; k--) { if (pool[k].key == NULL) continue; bestdbid = pool[k].dbid; if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_LRU) { de = dictFind(server.db[pool[k].dbid].dict, pool[k].key); } else { de = dictFind(server.db[pool[k].dbid].expires, pool[k].key); } /* Remove the entry from the pool. */ if (pool[k].key != pool[k].cached) sdsfree(pool[k].key); pool[k].key = NULL; pool[k].idle = 0; /* If the key exists, is our pick. Otherwise it is * a ghost and we need to try the next element. */ if (de) { bestkey = dictGetKey(de); break; } else { /* Ghost... Iterate again. */ } } } } /* volatile-random and allkeys-random policy */ else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM || server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM) { /* When evicting a random key, we try to evict a key for * each DB, so we use the static 'next_db' variable to * incrementally visit all DBs. */ for (i = 0; i < server.dbnum; i++) { j = (++next_db) % server.dbnum; db = server.db+j; dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ? db->dict : db->expires; if (dictSize(dict) != 0) { de = dictGetRandomKey(dict); bestkey = dictGetKey(de); bestdbid = j; break; } } } /* volatile-ttl */ else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) { long bestttl = 0; /* Initialized to avoid warning. */ /* In this policy we scan a single DB per iteration (visiting * a different DB per call), expiring the key with the smallest * TTL among the few sampled. * * Note that this algorithm makes local-DB choices, and should * use a pool and code more similr to the one used in the * LRU eviction policies in the future. */ for (i = 0; i < server.dbnum; i++) { j = (++next_db) % server.dbnum; db = server.db+j; dict = db->expires; if (dictSize(dict) != 0) { for (k = 0; k < server.maxmemory_samples; k++) { sds thiskey; long thisttl; de = dictGetRandomKey(dict); thiskey = dictGetKey(de); thisttl = (long) dictGetVal(de); /* Keys expiring sooner (smaller unix timestamp) are * better candidates for deletion */ if (bestkey == NULL || thisttl < bestttl) { bestkey = thiskey; bestttl = thisttl; bestdbid = j; } } } } } /* Finally remove the selected key. */ if (bestkey) { db = server.db+bestdbid; robj *keyobj = createStringObject(bestkey,sdslen(bestkey)); propagateExpire(db,keyobj,server.lazyfree_lazy_eviction); /* We compute the amount of memory freed by db*Delete() alone. * It is possible that actually the memory needed to propagate * the DEL in AOF and replication link is greater than the one * we are freeing removing the key, but we can't account for * that otherwise we would never exit the loop. * * AOF and Output buffer memory will be freed eventually so * we only care about memory used by the key space. */ delta = (long long) zmalloc_used_memory(); latencyStartMonitor(eviction_latency); if (server.lazyfree_lazy_eviction) dbAsyncDelete(db,keyobj); else dbSyncDelete(db,keyobj); latencyEndMonitor(eviction_latency); latencyAddSampleIfNeeded("eviction-del",eviction_latency); latencyRemoveNestedEvent(latency,eviction_latency); delta -= (long long) zmalloc_used_memory(); mem_freed += delta; server.stat_evictedkeys++; notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted", keyobj, db->id); decrRefCount(keyobj); keys_freed++; /* When the memory to free starts to be big enough, we may * start spending so much time here that is impossible to * deliver data to the slaves fast enough, so we force the * transmission here inside the loop. */ if (slaves) flushSlavesOutputBuffers(); } if (!keys_freed) { latencyEndMonitor(latency); latencyAddSampleIfNeeded("eviction-cycle",latency); goto cant_free; /* nothing to free... */ } } latencyEndMonitor(latency); latencyAddSampleIfNeeded("eviction-cycle",latency); return C_OK; cant_free: /* We are here if we are not able to reclaim memory. There is only one * last thing we can try: check if the lazyfree thread has jobs in queue * and wait... */ while(bioPendingJobsOfType(BIO_LAZY_FREE)) { if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree) break; usleep(1000); } return C_ERR; }
/* Try to swap an object that's a good candidate for swapping. * Returns REDIS_OK if the object was swapped, REDIS_ERR if it's not possible * to swap any object at all. * * If 'usethreaded' is true, Redis will try to swap the object in background * using I/O threads. */ int vmSwapOneObject(int usethreads) { int j, i; struct dictEntry *best = NULL; double best_swappability = 0; redisDb *best_db = NULL; robj *val; sds key; for (j = 0; j < server.dbnum; j++) { redisDb *db = server.db+j; /* Why maxtries is set to 100? * Because this way (usually) we'll find 1 object even if just 1% - 2% * are swappable objects */ int maxtries = 100; if (dictSize(db->dict) == 0) continue; for (i = 0; i < 5; i++) { dictEntry *de; double swappability; if (maxtries) maxtries--; de = dictGetRandomKey(db->dict); val = dictGetEntryVal(de); /* Only swap objects that are currently in memory. * * Also don't swap shared objects: not a good idea in general and * we need to ensure that the main thread does not touch the * object while the I/O thread is using it, but we can't * control other keys without adding additional mutex. */ if (val->storage != REDIS_VM_MEMORY || val->refcount != 1) { if (maxtries) i--; /* don't count this try */ continue; } swappability = computeObjectSwappability(val); if (!best || swappability > best_swappability) { best = de; best_swappability = swappability; best_db = db; } } } if (best == NULL) return REDIS_ERR; key = dictGetEntryKey(best); val = dictGetEntryVal(best); redisLog(REDIS_DEBUG,"Key with best swappability: %s, %f", key, best_swappability); /* Swap it */ if (usethreads) { robj *keyobj = createStringObject(key,sdslen(key)); vmSwapObjectThreaded(keyobj,val,best_db); decrRefCount(keyobj); return REDIS_OK; } else { vmpointer *vp; if ((vp = vmSwapObjectBlocking(val)) != NULL) { dictGetEntryVal(best) = vp; return REDIS_OK; } else { return REDIS_ERR; } } }
/* How a good candidate is this object for swapping? * The better candidate it is, the greater the returned value. * * Currently we try to perform a fast estimation of the object size in * memory, and combine it with aging informations. * * Basically swappability = idle-time * log(estimated size) * * Bigger objects are preferred over smaller objects, but not * proportionally, this is why we use the logarithm. This algorithm is * just a first try and will probably be tuned later. */ double computeObjectSwappability(robj *o) { /* actual age can be >= minage, but not < minage. As we use wrapping * 21 bit clocks with minutes resolution for the LRU. */ time_t minage = estimateObjectIdleTime(o); #ifdef _WIN32 ssize_t asize = 0, elesize; #else long asize = 0, elesize; #endif robj *ele; list *l; listNode *ln; dict *d; struct dictEntry *de; if (minage <= 0) return 0; switch(o->type) { case REDIS_STRING: if (o->encoding != REDIS_ENCODING_RAW) { asize = sizeof(*o); } else { #ifdef _WIN32 asize = sdslen(o->ptr)+sizeof(*o)+sizeof(size_t)*2; #else asize = sdslen(o->ptr)+sizeof(*o)+sizeof(long)*2; #endif } break; case REDIS_LIST: if (o->encoding == REDIS_ENCODING_ZIPLIST) { asize = sizeof(*o)+ziplistBlobLen(o->ptr); } else { l = o->ptr; ln = listFirst(l); asize = sizeof(list); if (ln) { ele = ln->value; elesize = (ele->encoding == REDIS_ENCODING_RAW) ? (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o); asize += (sizeof(listNode)+elesize)*listLength(l); } } break; case REDIS_SET: if (o->encoding == REDIS_ENCODING_INTSET) { intset *is = o->ptr; asize = sizeof(*is)+is->encoding*is->length; } else { d = o->ptr; asize = sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d)); if (dictSize(d)) { de = dictGetRandomKey(d); ele = dictGetEntryKey(de); elesize = (ele->encoding == REDIS_ENCODING_RAW) ? (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o); asize += (sizeof(struct dictEntry)+elesize)*dictSize(d); } } break; case REDIS_ZSET: if (o->encoding == REDIS_ENCODING_ZIPLIST) { asize = sizeof(*o)+(ziplistBlobLen(o->ptr) / 2); } else { d = ((zset*)o->ptr)->dict; asize = sizeof(zset)+(sizeof(struct dictEntry*)*dictSlots(d)); if (dictSize(d)) { de = dictGetRandomKey(d); ele = dictGetEntryKey(de); elesize = (ele->encoding == REDIS_ENCODING_RAW) ? (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o); asize += (sizeof(struct dictEntry)+elesize)*dictSize(d); asize += sizeof(zskiplistNode)*dictSize(d); } } break; case REDIS_HASH: if (o->encoding == REDIS_ENCODING_ZIPMAP) { unsigned char *p = zipmapRewind((unsigned char*)o->ptr); unsigned int len = zipmapLen((unsigned char*)o->ptr); unsigned int klen, vlen; unsigned char *key, *val; if ((p = zipmapNext(p,&key,&klen,&val,&vlen)) == NULL) { klen = 0; vlen = 0; } asize = len*(klen+vlen+3); } else if (o->encoding == REDIS_ENCODING_HT) { d = o->ptr; asize = sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d)); if (dictSize(d)) { de = dictGetRandomKey(d); ele = dictGetEntryKey(de); elesize = (ele->encoding == REDIS_ENCODING_RAW) ? (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o); ele = dictGetEntryVal(de); elesize = (ele->encoding == REDIS_ENCODING_RAW) ? (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o); asize += (sizeof(struct dictEntry)+elesize)*dictSize(d); } } break; } return (double)minage*log(1+(double)asize); }
int freeMemoryIfNeeded(void) { size_t mem_reported, mem_used, mem_tofree, mem_freed; mstime_t latency, eviction_latency; long long delta; int slaves = listLength(server.slaves); /* When clients are paused the dataset should be static not just from the * POV of clients not being able to write, but also from the POV of * expires and evictions of keys not being performed. */ if (clientsArePaused()) return C_OK; /* Check if we are over the memory usage limit. If we are not, no need * to subtract the slaves output buffers. We can just return ASAP. */ mem_reported = zmalloc_used_memory(); if (mem_reported <= server.maxmemory) return C_OK; /* Remove the size of slaves output buffers and AOF buffer from the * count of used memory. */ mem_used = mem_reported; size_t overhead = freeMemoryGetNotCountedMemory(); mem_used = (mem_used > overhead) ? mem_used-overhead : 0; /* Check if we are still over the memory limit. */ if (mem_used <= server.maxmemory) return C_OK; /* Compute how much memory we need to free. */ mem_tofree = mem_used - server.maxmemory; mem_freed = 0; if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION) goto cant_free; /* We need to free memory, but policy forbids. */ latencyStartMonitor(latency); while (mem_freed < mem_tofree) { int j, k, i, keys_freed = 0; static int next_db = 0; sds bestkey = NULL; int bestdbid; redisDb *db; dict *dict; dictEntry *de; if (server.maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) || server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) { struct evictionPoolEntry *pool = EvictionPoolLRU; while(bestkey == NULL) { unsigned long total_keys = 0, keys; /* We don't want to make local-db choices when expiring keys, * so to start populate the eviction pool sampling keys from * every DB. */ for (i = 0; i < server.dbnum; i++) { db = server.db+i; dict = (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ? db->dict : db->expires; if ((keys = dictSize(dict)) != 0) { evictionPoolPopulate(i, dict, db->dict, pool); total_keys += keys; } } if (!total_keys) break; /* No keys to evict. */ /* Go backward from best to worst element to evict. */ for (k = EVPOOL_SIZE-1; k >= 0; k--) { if (pool[k].key == NULL) continue; bestdbid = pool[k].dbid; if (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) { de = dictFind(server.db[pool[k].dbid].dict, pool[k].key); } else { de = dictFind(server.db[pool[k].dbid].expires, pool[k].key); } /* Remove the entry from the pool. */ if (pool[k].key != pool[k].cached) sdsfree(pool[k].key); pool[k].key = NULL; pool[k].idle = 0; /* If the key exists, is our pick. Otherwise it is * a ghost and we need to try the next element. */ if (de) { bestkey = dictGetKey(de); break; } else { /* Ghost... Iterate again. */ } } } } /* volatile-random and allkeys-random policy */ else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM || server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM) { /* When evicting a random key, we try to evict a key for * each DB, so we use the static 'next_db' variable to * incrementally visit all DBs. */ for (i = 0; i < server.dbnum; i++) { j = (++next_db) % server.dbnum; db = server.db+j; dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ? db->dict : db->expires; if (dictSize(dict) != 0) { de = dictGetRandomKey(dict); bestkey = dictGetKey(de); bestdbid = j; break; } } } /* Finally remove the selected key. */ if (bestkey) { db = server.db+bestdbid; robj *keyobj = createStringObject(bestkey,sdslen(bestkey)); propagateExpire(db,keyobj,server.lazyfree_lazy_eviction); /* We compute the amount of memory freed by db*Delete() alone. * It is possible that actually the memory needed to propagate * the DEL in AOF and replication link is greater than the one * we are freeing removing the key, but we can't account for * that otherwise we would never exit the loop. * * AOF and Output buffer memory will be freed eventually so * we only care about memory used by the key space. */ delta = (long long) zmalloc_used_memory(); latencyStartMonitor(eviction_latency); if (server.lazyfree_lazy_eviction) dbAsyncDelete(db,keyobj); else dbSyncDelete(db,keyobj); latencyEndMonitor(eviction_latency); latencyAddSampleIfNeeded("eviction-del",eviction_latency); latencyRemoveNestedEvent(latency,eviction_latency); delta -= (long long) zmalloc_used_memory(); mem_freed += delta; server.stat_evictedkeys++; notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted", keyobj, db->id); decrRefCount(keyobj); keys_freed++; /* When the memory to free starts to be big enough, we may * start spending so much time here that is impossible to * deliver data to the slaves fast enough, so we force the * transmission here inside the loop. */ if (slaves) flushSlavesOutputBuffers(); /* Normally our stop condition is the ability to release * a fixed, pre-computed amount of memory. However when we * are deleting objects in another thread, it's better to * check, from time to time, if we already reached our target * memory, since the "mem_freed" amount is computed only * across the dbAsyncDelete() call, while the thread can * release the memory all the time. */ if (server.lazyfree_lazy_eviction && !(keys_freed % 16)) { overhead = freeMemoryGetNotCountedMemory(); mem_used = zmalloc_used_memory(); mem_used = (mem_used > overhead) ? mem_used-overhead : 0; if (mem_used <= server.maxmemory) { mem_freed = mem_tofree; } } } if (!keys_freed) { latencyEndMonitor(latency); latencyAddSampleIfNeeded("eviction-cycle",latency); goto cant_free; /* nothing to free... */ } } latencyEndMonitor(latency); latencyAddSampleIfNeeded("eviction-cycle",latency); return C_OK; cant_free: /* We are here if we are not able to reclaim memory. There is only one * last thing we can try: check if the lazyfree thread has jobs in queue * and wait... */ while(bioPendingJobsOfType(BIO_LAZY_FREE)) { if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree) break; usleep(1000); } return C_ERR; }