// 撤销对这个客户端的所有 WATCH // 清除 EXEC dirty FLAG 的任务由调用者完成 void unwatchAllKeys(redisClient *c) { listIter li; listNode *ln; // 没有 WATCHED KEY ,直接返回 if (listLength(c->watched_keys) == 0) return; listRewind(c->watched_keys,&li); while((ln = listNext(&li))) { list *clients; watchedKey *wk; /* Lookup the watched key -> clients list and remove the client * from the list */ // 将当前客户端从监视 KEY 的链表中移除 wk = listNodeValue(ln); clients = dictFetchValue(wk->db->watched_keys, wk->key); redisAssertWithInfo(c,NULL,clients != NULL); listDelNode(clients,listSearchKey(clients,c)); /* Kill the entry at all if this was the only client */ // 如果监视 KEY 的只有这个客户端 // 那么将链表从字典中删除 if (listLength(clients) == 0) dictDelete(wk->db->watched_keys, wk->key); /* Remove this watched key from the client->watched list */ // 还需要将 KEY 从 client->watched_keys 链表中移除 listDelNode(c->watched_keys,ln); decrRefCount(wk->key); zfree(wk); } }
/* Unwatch all the keys watched by this client. To clean the EXEC dirty * flag is up to the caller. */ void unwatchAllKeys(redisClient *c) { listIter li; listNode *ln; if (listLength(c->watched_keys) == 0) return; listRewind(c->watched_keys,&li); while((ln = listNext(&li))) { list *clients; watchedKey *wk; /* Lookup the watched key -> clients list and remove the client * from the list */ wk = listNodeValue(ln); clients = dictFetchValue(wk->db->watched_keys, wk->key); redisAssert(clients != NULL); listDelNode(clients,listSearchKey(clients,c)); /* Kill the entry at all if this was the only client */ if (listLength(clients) == 0) dictDelete(wk->db->watched_keys, wk->key); /* Remove this watched key from the client->watched list */ listDelNode(c->watched_keys,ln); decrRefCount(wk->key); zfree(wk); } }
/* Remove the 'key' from the list of blocked keys for a given client. * * The function returns 1 when there are no longer blocking keys after * the current one was removed (and the client can be unblocked). */ int dontWaitForSwappedKey(redisClient *c, robj *key) { list *l; listNode *ln; listIter li; struct dictEntry *de; /* The key object might be destroyed when deleted from the c->io_keys * list (and the "key" argument is physically the same object as the * object inside the list), so we need to protect it. */ incrRefCount(key); /* Remove the key from the list of keys this client is waiting for. */ listRewind(c->io_keys,&li); while ((ln = listNext(&li)) != NULL) { if (equalStringObjects(ln->value,key)) { listDelNode(c->io_keys,ln); break; } } redisAssert(ln != NULL); /* Remove the client form the key => waiting clients map. */ de = dictFind(c->db->io_keys,key); redisAssert(de != NULL); l = dictGetEntryVal(de); ln = listSearchKey(l,c); redisAssert(ln != NULL); listDelNode(l,ln); if (listLength(l) == 0) dictDelete(c->db->io_keys,key); decrRefCount(key); return listLength(c->io_keys) == 0; }
/* Unsubscribe a client from a channel. Returns 1 if the operation succeeded, or * 0 if the client was not subscribed to the specified channel. */ int pubsubUnsubscribePattern(redisClient *c, robj *pattern, int notify) { listNode *ln; pubsubPattern pat; int retval = 0; incrRefCount(pattern); /* Protect the object. May be the same we remove */ if ((ln = listSearchKey(c->pubsub_patterns,pattern)) != NULL) { retval = 1; listDelNode(c->pubsub_patterns,ln); pat.client = c; pat.pattern = pattern; ln = listSearchKey(server.pubsub_patterns,&pat); listDelNode(server.pubsub_patterns,ln); } /* Notify the client */ if (notify) { addReply(c,shared.mbulkhdr[3]); addReply(c,shared.punsubscribebulk); addReplyBulk(c,pattern); addReplyLongLong(c,dictSize(c->pubsub_channels)+ listLength(c->pubsub_patterns)); } decrRefCount(pattern); return retval; }
// 取消客户端对所有的键的监视,清理 EXEC dirty 标识状态由调用者决定 void unwatchAllKeys(client *c) { listIter li; listNode *ln; // 如果客户端没有监视key则直接返回 if (listLength(c->watched_keys) == 0) return; listRewind(c->watched_keys,&li); // 遍历客户端监视的key while((ln = listNext(&li))) { list *clients; watchedKey *wk; /* Lookup the watched key -> clients list and remove the client * from the list */ wk = listNodeValue(ln); // 从数据库中的watched_keys字典中查找出监视key的client clients = dictFetchValue(wk->db->watched_keys, wk->key); serverAssertWithInfo(c,NULL,clients != NULL); // 从client的链表中删除当前client节点 listDelNode(clients,listSearchKey(clients,c)); /* Kill the entry at all if this was the only client */ // 如果client链表为空,标识给key没有被监视 if (listLength(clients) == 0) // 从数据库的watched_keys中删除该key dictDelete(wk->db->watched_keys, wk->key); /* Remove this watched key from the client->watched list */ // 从客户端的watched_keys中删除该节点 listDelNode(c->watched_keys,ln); decrRefCount(wk->key); zfree(wk); } }
void ltrimCommand(redisClient *c) { robj *o; long start, end, llen, j, ltrim, rtrim; list *list; listNode *ln; int slotnum = keyHashSlot(c->argv[1]->ptr, sdslen(c->argv[1]->ptr)); if ((getLongFromObjectOrReply(c, c->argv[2], &start, NULL) != REDIS_OK) || (getLongFromObjectOrReply(c, c->argv[3], &end, NULL) != REDIS_OK)) return; if ((o = lookupKeyWriteOrReply(c,c->argv[1],shared.ok,slotnum)) == NULL || checkType(c,o,REDIS_LIST)) return; llen = listTypeLength(o); /* convert negative indexes */ if (start < 0) start = llen+start; if (end < 0) end = llen+end; if (start < 0) start = 0; /* Invariant: start >= 0, so this test will be true when end < 0. * The range is empty when start > end or start >= length. */ if (start > end || start >= llen) { /* Out of range start or start > end result in empty list */ ltrim = llen; rtrim = 0; } else { if (end >= llen) end = llen-1; ltrim = start; rtrim = llen-end-1; } /* Remove list elements to perform the trim */ if (o->encoding == REDIS_ENCODING_ZIPLIST) { o->ptr = ziplistDeleteRange(o->ptr,0,ltrim); o->ptr = ziplistDeleteRange(o->ptr,-rtrim,rtrim); } else if (o->encoding == REDIS_ENCODING_LINKEDLIST) { list = o->ptr; for (j = 0; j < ltrim; j++) { ln = listFirst(list); listDelNode(list,ln); } for (j = 0; j < rtrim; j++) { ln = listLast(list); listDelNode(list,ln); } } else { redisPanic("Unknown list encoding"); } notifyKeyspaceEvent(REDIS_NOTIFY_LIST,"ltrim",c->argv[1],c->db->id); if (listTypeLength(o) == 0) { dbDelete(c->db,c->argv[1],slotnum); notifyKeyspaceEvent(REDIS_NOTIFY_GENERIC,"del",c->argv[1],c->db->id); } signalModifiedKey(c->db,c->argv[1],slotnum); server.dirty++; addReply(c,shared.ok); }
/* release memory allocated for copy on write during background save */ void cowBkgdSaveReset() { int j; listNode *ln; if (server.cowDictCopied != NULL) { for (j = 0; j < server.dbnum; j++) { if (server.cowSaveDb[j].dict != NULL) { /* restore normal dictionary destructors */ restore_dictobj(server.db[j].dict, &server.cowSaveDbExt[j]); server.cowSaveDb[j].dict = NULL; } if (server.cowSaveDbExt[j].dictArray != NULL) { cowReleaseDictArray(server.cowSaveDbExt[j].dictArray); server.cowSaveDbExt[j].dictArray = NULL; } if (server.cowSaveDb[j].expires != NULL && server.cowSaveDb[j].expires != server.db[j].expires) { dictRelease(server.cowSaveDb[j].expires); server.cowSaveDb[j].expires = NULL; } } } server.cowCurIters.curDbDictIter = NULL; server.cowCurIters.curObjDictIter = NULL; server.cowCurIters.curObjZDictIter = NULL; server.cowCurIters.curObjListIter = NULL; /* cleanup table of copied items */ if (server.cowDictCopied != NULL) { dictRelease(server.cowDictCopied); server.cowDictCopied = NULL; } if (server.cowDictConverted != NULL) { dictRelease(server.cowDictConverted); server.cowDictConverted = NULL; } /* delete all deferred items */ redisLog(REDIS_NOTICE, "cowBkgdSaveReset deleting %d SDS and %d obj items", listLength(deferSdsDelete), listLength(deferObjDelete)); while ( (ln = listFirst(deferSdsDelete)) != NULL) { sdsfree((sds)(ln->value)); listDelNode(deferSdsDelete, ln); } while ( (ln = listFirst(deferObjDelete)) != NULL) { if (ln->value != NULL) { decrRefCount(ln->value); } listDelNode(deferObjDelete, ln); } }
void *IOThreadEntryPoint(void *arg) { iojob *j; listNode *ln; REDIS_NOTUSED(arg); pthread_detach(pthread_self()); while(1) { /* Get a new job to process */ lockThreadedIO(); if (listLength(server.io_newjobs) == 0) { /* No new jobs in queue, exit. */ redisLog(REDIS_DEBUG,"Thread %ld exiting, nothing to do", (long) pthread_self()); server.io_active_threads--; unlockThreadedIO(); return NULL; } ln = listFirst(server.io_newjobs); j = ln->value; listDelNode(server.io_newjobs,ln); /* Add the job in the processing queue */ j->thread = pthread_self(); listAddNodeTail(server.io_processing,j); ln = listLast(server.io_processing); /* We use ln later to remove it */ unlockThreadedIO(); redisLog(REDIS_DEBUG,"Thread %ld got a new job (type %d): %p about key '%s'", (long) pthread_self(), j->type, (void*)j, (char*)j->key->ptr); /* Process the Job */ if (j->type == REDIS_IOJOB_LOAD) { vmpointer *vp = (vmpointer*)j->id; j->val = vmReadObjectFromSwap(j->page,vp->vtype); } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) { j->pages = rdbSavedObjectPages(j->val); } else if (j->type == REDIS_IOJOB_DO_SWAP) { if (vmWriteObjectOnSwap(j->val,j->page) == REDIS_ERR) j->canceled = 1; } /* Done: insert the job into the processed queue */ redisLog(REDIS_DEBUG,"Thread %ld completed the job: %p (key %s)", (long) pthread_self(), (void*)j, (char*)j->key->ptr); lockThreadedIO(); listDelNode(server.io_processing,ln); listAddNodeTail(server.io_processed,j); unlockThreadedIO(); /* Signal the main thread there is new stuff to process */ redisAssert(write(server.io_ready_pipe_write,"x",1) == 1); } return NULL; /* never reached */ }
void ltrimCommand(redisClient *c) { robj *o; int start = atoi(c->argv[2]->ptr); int end = atoi(c->argv[3]->ptr); int llen; int j, ltrim, rtrim; list *list; listNode *ln; if ((o = lookupKeyWriteOrReply(c,c->argv[1],shared.ok)) == NULL || checkType(c,o,REDIS_LIST)) return; llen = listTypeLength(o); /* convert negative indexes */ if (start < 0) start = llen+start; if (end < 0) end = llen+end; if (start < 0) start = 0; /* Invariant: start >= 0, so this test will be true when end < 0. * The range is empty when start > end or start >= length. */ if (start > end || start >= llen) { /* Out of range start or start > end result in empty list */ ltrim = llen; rtrim = 0; } else { if (end >= llen) end = llen-1; ltrim = start; rtrim = llen-end-1; } /* Remove list elements to perform the trim */ if (o->encoding == REDIS_ENCODING_ZIPLIST) { o->ptr = ziplistDeleteRange(o->ptr,0,ltrim); o->ptr = ziplistDeleteRange(o->ptr,-rtrim,rtrim); } else if (o->encoding == REDIS_ENCODING_LINKEDLIST) { list = o->ptr; for (j = 0; j < ltrim; j++) { ln = listFirst(list); listDelNode(list,ln); } for (j = 0; j < rtrim; j++) { ln = listLast(list); listDelNode(list,ln); } } else { redisPanic("Unknown list encoding"); } if (listTypeLength(o) == 0) dbDelete(c->db,c->argv[1]); signalModifiedKey(c->db,c->argv[1]); server.dirty++; addReply(c,shared.ok); }
int closeTimedoutClients(aeEventLoop *el) { if(el->myid != 0) { httpClient *c; int deletedNodes = 0; time_t now = time(NULL); listIter li; listNode *ln; listRewind(el->clients,&li); while ((ln = listNext(&li)) != NULL) { c = listNodeValue(ln); if (el->maxidletime && (now - c->lastinteraction > el->maxidletime)) { /* the client is waiting for reply */ if (c->blocked) { /* This situation happens when request_handler time exceeds client timeout. * Client timeout is typically 30 seconds and * Request_handler rarely consumes more than 1 second. * This rare case has a very small role in overall performance. */ listNode *ln = listSearchKey(c->ceList,c); if(ln) listDelNode(c->ceList,ln); } freeClient(c); deletedNodes++; } else break; } return deletedNodes; } return 0; }
void aio_submit_queue() { int rv; struct aiocb *cb; if (0 == listLength(aio_queue)) return; listIter *it; listNode *nd; it = listGetIterator(aio_queue, AL_START_HEAD); while ((nd = listNext(it))) { cb = (struct aiocb *)nd->value; cb->retries += 1; rv = aio_submit(cb); if (0 == rv) { listDelNode(aio_queue, nd); } else if (-2 == rv) { break; } } end: zfree(it); }
void msg_put(struct msg *msg) { listNode *node; struct mbuf *mbuf; log_debug(LOG_VVERB, "put msg %p id %"PRIu64"", msg, msg->id); while (listLength(msg->data) > 0) { node = listFirst(msg->data); mbuf = listNodeValue(node); listDelNode(msg->data, node); mbuf_put(mbuf); } listRelease(msg->data); msg->data = NULL; if (msg->frag_seq) { rmt_free(msg->frag_seq); msg->frag_seq = NULL; } if (msg->keys) { msg->keys->nelem = 0; /* a hack here */ array_destroy(msg->keys); msg->keys = NULL; } msg->mb = NULL; }
/* Unblock a client that's waiting in a blocking operation such as BLPOP */ void unblockClientWaitingData(redisClient *c) { dictEntry *de; list *l; int j; redisAssert(c->blocking_keys != NULL); /* The client may wait for multiple keys, so unblock it for every key. */ for (j = 0; j < c->blocking_keys_num; j++) { /* Remove this client from the list of clients waiting for this key. */ de = dictFind(c->db->blocking_keys,c->blocking_keys[j]); redisAssert(de != NULL); l = dictGetEntryVal(de); listDelNode(l,listSearchKey(l,c)); /* If the list is empty we need to remove it to avoid wasting memory */ if (listLength(l) == 0) dictDelete(c->db->blocking_keys,c->blocking_keys[j]); decrRefCount(c->blocking_keys[j]); } /* Cleanup the client structure */ zfree(c->blocking_keys); c->blocking_keys = NULL; c->flags &= (~REDIS_BLOCKED); server.blpop_blocked_clients--; /* We want to process data if there is some command waiting * in the input buffer. Note that this is safe even if * unblockClientWaitingData() gets called from freeClient() because * freeClient() will be smart enough to call this function * *after* c->querybuf was set to NULL. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); }
//解阻塞一个正在阻塞中的client void unblockClientWaitingData(client *c) { dictEntry *de; dictIterator *di; list *l; serverAssertWithInfo(c,NULL,dictSize(c->bpop.keys) != 0); //创建一个字典的迭代器,指向的是造成client阻塞的键所组成的字典 di = dictGetIterator(c->bpop.keys); /* The client may wait for multiple keys, so unblock it for every key. */ //因为client可能被多个key所阻塞,所以要遍历所有的键 while((de = dictNext(di)) != NULL) { robj *key = dictGetKey(de); //获得key对象 /* Remove this client from the list of clients waiting for this key. */ //根据key找到对应的列表类型值,值保存着被阻塞的client,从中找c->db->blocking_keys中寻找 l = dictFetchValue(c->db->blocking_keys,key); serverAssertWithInfo(c,key,l != NULL); // 将阻塞的client从列表中移除 listDelNode(l,listSearchKey(l,c)); /* If the list is empty we need to remove it to avoid wasting memory */ //如果当前列表为空了,则从c->db->blocking_keys中将key删除 if (listLength(l) == 0) dictDelete(c->db->blocking_keys,key); } dictReleaseIterator(di); //释放迭代器 /* Cleanup the client structure */ //清空bpop.keys的所有节点 dictEmpty(c->bpop.keys,NULL); //如果保存有新添加的元素,则应该释放 if (c->bpop.target) { decrRefCount(c->bpop.target); c->bpop.target = NULL; } }
void freeClient(vuiClient *c) { listNode *ln; listIter *it; cJSON *json; /* Free the query buffer */ sdsfree(c->querybuf); sdsfree(c->querymsg); sdsfree(c->res.body); sdsfree(c->res.reason); sdsfree(c->res.buf); /* Close socket, unregister events, and remove list of replies and * accumulated arguments. */ if (c->fd != -1) { aeDeleteFileEvent(server.el,c->fd,AE_READABLE); aeDeleteFileEvent(server.el,c->fd,AE_WRITABLE); close(c->fd); } it = listGetIterator(c->jsons, AL_START_HEAD); while((ln = listNext(it))) { json = (cJSON *)ln->value; cJSON_Delete(json); listDelNode(c->jsons, ln); } listRelease(c->jsons); listReleaseIterator(it); zfree(c); server.client = NULL; }
static void fts_index_del(fts_t *fts, fts_doc_t *doc) { int i, len, nonstopwords; sds *terms; terms = sds_tokenize(doc->doc->ptr, &len, &nonstopwords); if (!terms) return; for (i = 0; i < len; i++) { sds term = terms[i]; list *idx; listNode *ln; if (sdslen(term) == 0) { sdsfree(term); continue; } idx = dict_get(fts->index, term); assert(idx); ln = listSearchKey(idx, doc); assert(ln); index_item_t *idi = ln->value; idi->tf--; if (!idi->tf) listDelNode(idx, ln); sdsfree(term); } rr_free(terms); fts->len -= doc->len; }
//操作线程运行的函数。根据操作类型从任务队列中取出任务并调用相关函数执行 void *bioProcessBackgroundJobs(void *arg) { struct bio_job *job; unsigned long type = (unsigned long) arg; sigset_t sigset; /* Make the thread killable at any time, so that bioKillThreads() * can work reliably. */ //设置属性使线程可以在任意时候被杀死 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_mutex_lock(&bio_mutex[type]); /* Block SIGALRM so we are sure that only the main thread will * receive the watchdog signal. */ //阻塞信号量SIGALRM sigemptyset(&sigset); sigaddset(&sigset, SIGALRM); if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) redisLog(REDIS_WARNING, "Warning: can't mask SIGALRM in bio.c thread: %s", strerror(errno)); while(1) { listNode *ln; /* The loop always starts with the lock hold. */ //任务队列为空,等待新的任务的添加 if (listLength(bio_jobs[type]) == 0) { //注意,wait的时候会将mutex释放,否则会有死锁 pthread_cond_wait(&bio_condvar[type],&bio_mutex[type]); continue; } /* Pop the job from the queue. */ //从队列中取出一个任务 ln = listFirst(bio_jobs[type]); job = ln->value; /* It is now possible to unlock the background system as we know have * a stand alone job structure to process.*/ //对bio_jobs操作结束,可以解锁 pthread_mutex_unlock(&bio_mutex[type]); /* Process the job accordingly to its type. */ if (type == REDIS_BIO_CLOSE_FILE) { //操作是close close((long)job->arg1); } else if (type == REDIS_BIO_AOF_FSYNC) { //操作是fsync aof_fsync((long)job->arg1); } else { redisPanic("Wrong job type in bioProcessBackgroundJobs()."); } zfree(job); /* Lock again before reiterating the loop, if there are no longer * jobs to process we'll block again in pthread_cond_wait(). */ pthread_mutex_lock(&bio_mutex[type]); //从队列删除执行完的任务,将pending值减1 listDelNode(bio_jobs[type],ln); bio_pending[type]--; } }
/* Unblock a client that's waiting in a blocking operation such as BLPOP */ void unblockClientWaitingData(redisClient *c) { dictEntry *de; dictIterator *di; list *l; redisAssertWithInfo(c,NULL,dictSize(c->bpop.keys) != 0); di = dictGetIterator(c->bpop.keys); /* The client may wait for multiple keys, so unblock it for every key. */ while((de = dictNext(di)) != NULL) { robj *key = dictGetKey(de); /* Remove this client from the list of clients waiting for this key. */ l = dictFetchValue(c->db->blocking_keys,key); redisAssertWithInfo(c,key,l != NULL); listDelNode(l,listSearchKey(l,c)); /* If the list is empty we need to remove it to avoid wasting memory */ if (listLength(l) == 0) dictDelete(c->db->blocking_keys,key); } dictReleaseIterator(di); /* Cleanup the client structure */ dictEmpty(c->bpop.keys,NULL); if (c->bpop.target) { decrRefCount(c->bpop.target); c->bpop.target = NULL; } c->flags &= ~REDIS_BLOCKED; c->flags |= REDIS_UNBLOCKED; server.bpop_blocked_clients--; listAddNodeTail(server.unblocked_clients,c); }
/* Unblock a client that's waiting in a blocking operation such as BLPOP */ void unblockClientWaitingData(redisClient *c) { dictEntry *de; list *l; int j; redisAssertWithInfo(c,NULL,c->bpop.keys != NULL); /* The client may wait for multiple keys, so unblock it for every key. */ for (j = 0; j < c->bpop.count; j++) { /* Remove this client from the list of clients waiting for this key. */ de = dictFind(c->db->blocking_keys,c->bpop.keys[j]); redisAssertWithInfo(c,c->bpop.keys[j],de != NULL); l = dictGetVal(de); listDelNode(l,listSearchKey(l,c)); /* If the list is empty we need to remove it to avoid wasting memory */ if (listLength(l) == 0) dictDelete(c->db->blocking_keys,c->bpop.keys[j]); decrRefCount(c->bpop.keys[j]); } /* Cleanup the client structure */ zfree(c->bpop.keys); c->bpop.keys = NULL; if (c->bpop.target) decrRefCount(c->bpop.target); c->bpop.target = NULL; c->flags &= ~REDIS_BLOCKED; c->flags |= REDIS_UNBLOCKED; server.bpop_blocked_clients--; listAddNodeTail(server.unblocked_clients,c); }
static void glueReplyBuffersIfNeeded(redisClient *c) { int copylen = 0; char buf[GLUEREPLY_UP_TO]; listNode *ln; listIter li; robj *o; listRewind(c->reply,&li); while((ln = listNext(&li))) { int objlen; o = ln->value; objlen = sdslen(o->ptr); if (copylen + objlen <= GLUEREPLY_UP_TO) { memcpy(buf+copylen,o->ptr,objlen); copylen += objlen; listDelNode(c->reply,ln); } else { if (copylen == 0) return; break; } } /* Now the output buffer is empty, add the new single element */ o = createObject(REDIS_STRING,sdsnewlen(buf,copylen)); listAddNodeHead(c->reply,o); }
robj *createObject(int type, void *ptr) { robj *o; if (server.vm_enabled) pthread_mutex_lock(&server.obj_freelist_mutex); if (listLength(server.objfreelist)) { listNode *head = listFirst(server.objfreelist); o = listNodeValue(head); listDelNode(server.objfreelist,head); if (server.vm_enabled) pthread_mutex_unlock(&server.obj_freelist_mutex); } else { if (server.vm_enabled) pthread_mutex_unlock(&server.obj_freelist_mutex); o = zmalloc(sizeof(*o)); } o->type = type; o->encoding = REDIS_ENCODING_RAW; o->ptr = ptr; o->refcount = 1; if (server.vm_enabled) { /* Note that this code may run in the context of an I/O thread * and accessing server.lruclock in theory is an error * (no locks). But in practice this is safe, and even if we read * garbage Redis will not fail. */ o->lru = server.lruclock; o->storage = REDIS_VM_MEMORY; } return o; }
/* Unblock a client that's waiting in a blocking operation such as BLPOP. * You should never call this function directly, but unblockClient() instead. */ void unblockClientWaitingData(redisClient *c) { dictEntry *de; dictIterator *di; list *l; redisAssertWithInfo(c,NULL,dictSize(c->bpop.keys) != 0); di = dictGetIterator(c->bpop.keys); /* The client may wait for multiple keys, so unblock it for every key. */ while((de = dictNext(di)) != NULL) { robj *key = dictGetKey(de); redisDb *db = &(c->db)[keyHashSlot(key->ptr, sdslen(key->ptr))]; /* Remove this client from the list of clients waiting for this key. */ l = dictFetchValue(db->blocking_keys,key); redisAssertWithInfo(c,key,l != NULL); listDelNode(l,listSearchKey(l,c)); /* If the list is empty we need to remove it to avoid wasting memory */ if (listLength(l) == 0) dictDelete(db->blocking_keys,key); } dictReleaseIterator(di); /* Cleanup the client structure */ dictEmpty(c->bpop.keys,NULL); if (c->bpop.target) { decrRefCount(c->bpop.target); c->bpop.target = NULL; } }
void freeClient(void *vc) { ugClient *c = (ugClient *) vc; listNode *node = listSearchKey(server.clients, c); if (node) { listDelNode(server.clients, node); } }
/* Push a new entry into the slow log. * This function will make sure to trim the slow log accordingly to the * configured max length. */ void slowlogPushEntryIfNeeded(robj **argv, int argc, long long duration) { if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */ if (duration >= server.slowlog_log_slower_than) listAddNodeHead(server.slowlog,slowlogCreateEntry(argv,argc,duration)); /* Remove old entries if needed. */ while (listLength(server.slowlog) > server.slowlog_max_len) listDelNode(server.slowlog,listLast(server.slowlog)); }
void freeClient(httpClient *c) { aeDeleteFileEvent(c->el,c->fd); close(c->fd); /* Release memory */ if(c->ip) free(c->ip); replyFree(c->rep); requestFree(c->req); if(c->elNode) listDelNode(c->el->clients,c->elNode); free(c); }
void freeClient(client *c) { listNode *ln; if (c->flags & CLIENT_BLOCKED) { if (c->bfree) c->bfree(c->bpop.data); } /* If this is marked as current client unset it */ if (server.current_client == c) server.current_client = NULL; /* Free the query buffer */ sdsfree(c->querybuf); c->querybuf = NULL; /* Close socket, unregister events, and remove list of replies and * accumulated arguments. */ if (c->fd != -1) { aeDeleteFileEvent(server.el,c->fd,AE_READABLE); aeDeleteFileEvent(server.el,c->fd,AE_WRITABLE); close(c->fd); } freeClientArgv(c); /* Remove from the list of clients */ if (c->fd != -1) { ln = listSearchKey(server.clients,c); serverAssert(ln != NULL); listDelNode(server.clients,ln); } /* If this client was scheduled for async freeing we need to remove it * from the queue. */ if (c->flags & CLIENT_CLOSE_ASAP) { ln = listSearchKey(server.clients_to_close,c); serverAssert(ln != NULL); listDelNode(server.clients_to_close,ln); } /* Release other dynamically allocated client structure fields, * and finally release the client structure itself. */ free(c->argv); free(c); }
void freeClientsInAsyncFreeQueue(void) { while (listLength(server.clients_to_close)) { listNode *ln = listFirst(server.clients_to_close); client *c = listNodeValue(ln); c->flags &= ~CLIENT_CLOSE_ASAP; freeClient(c); listDelNode(server.clients_to_close,ln); } }
void *bioProcessBackgroundJobs(void *arg) { struct bio_job *job; unsigned long type = (unsigned long) arg; sigset_t sigset; /* Make the thread killable at any time, so that bioKillThreads() * can work reliably. */ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_mutex_lock(&bio_mutex[type]); /* Block SIGALRM so we are sure that only the main thread will * receive the watchdog signal. */ sigemptyset(&sigset); sigaddset(&sigset, SIGALRM); if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) serverLog(LL_WARNING, "Warning: can't mask SIGALRM in bio.c thread: %s", strerror(errno)); while(1) { listNode *ln; /* The loop always starts with the lock hold. */ if (listLength(bio_jobs[type]) == 0) { pthread_cond_wait(&bio_newjob_cond[type],&bio_mutex[type]); continue; } /* Pop the job from the queue. */ ln = listFirst(bio_jobs[type]); job = ln->value; /* It is now possible to unlock the background system as we know have * a stand alone job structure to process.*/ pthread_mutex_unlock(&bio_mutex[type]); /* Process the job accordingly to its type. */ if (type == BIO_CLOSE_FILE) { close((long)job->arg1); } else if (type == BIO_AOF_FSYNC) { aof_fsync((long)job->arg1); } else { serverPanic("Wrong job type in bioProcessBackgroundJobs()."); } zfree(job); /* Unblock threads blocked on bioWaitStepOfType() if any. */ pthread_cond_broadcast(&bio_step_cond[type]); /* Lock again before reiterating the loop, if there are no longer * jobs to process we'll block again in pthread_cond_wait(). */ pthread_mutex_lock(&bio_mutex[type]); listDelNode(bio_jobs[type],ln); bio_pending[type]--; } }
/* Unsubscribe a client from a channel. Returns 1 if the operation succeeded, or * 0 if the client was not subscribed to the specified channel. * * 取消客户端 c 对模式 pattern 的订阅。 * * 取消成功返回 1 ,因为客户端未订阅 pattern 而造成取消失败,返回 0 。 */ int pubsubUnsubscribePattern(redisClient *c, robj *pattern, int notify) { listNode *ln; pubsubPattern pat; int retval = 0; incrRefCount(pattern); /* Protect the object. May be the same we remove */ // 先确认一下,客户端是否订阅了这个模式 if ((ln = listSearchKey(c->pubsub_patterns,pattern)) != NULL) { retval = 1; // 将模式从客户端的订阅列表中删除 listDelNode(c->pubsub_patterns,ln); // 设置 pubsubPattern 结构 pat.client = c; pat.pattern = pattern; // 在服务器中查找 ln = listSearchKey(server.pubsub_patterns,&pat); listDelNode(server.pubsub_patterns,ln); } /* Notify the client */ // 回复客户端 if (notify) { addReply(c,shared.mbulkhdr[3]); // "punsubscribe" 字符串 addReply(c,shared.punsubscribebulk); // 被退订的模式 addReplyBulk(c,pattern); // 退订频道之后客户端仍在订阅的频道和模式的总数 addReplyLongLong(c,dictSize(c->pubsub_channels)+ listLength(c->pubsub_patterns)); } decrRefCount(pattern); return retval; }
void cacheDelete(ccache* c, sds key) { cacheEntry *ce = (cacheEntry *)dictFetchValue(c->data,key); /* Master reply by setting val to an object. * We do not delete cache entry until the master reply */ if(ce&&ce->val) { cacheSendMessage(c,sdsdup(key),CACHE_REQUEST_OLD); listDelNode(c->accesslist,ce->ln); dictDelete(c->data,key); } }