Exemplo n.º 1
0
/* *
 * do migrate mutli key-value(s) for {slotsmgrt/slotsmgrtone}with tag commands
 * return value:
 *    -1 - error happens
 *   >=0 - # of success migration
 * */
static int
slotsmgrttag_command(redisClient *c, sds host, sds port, int timeout, robj *key) {
    int taglen;
    void *tag = slots_tag(key->ptr, &taglen);
    if (tag == NULL) {
        return slotsmgrtone_command(c, host, port, timeout, key);
    }

    int fd = slotsmgrt_get_socket(c, host, port, timeout);
    if (fd == -1) {
        return -1;
    }

    list *l = listCreate();
    listSetFreeMethod(l, decrRefCountVoid);
    do {
        uint32_t crc;
        int slot = slots_num(key->ptr, &crc);
        dict *d = c->db->hash_slots[slot];
        long long cursor = 0;
        void *args[] = {l, tag, &taglen, (void *)(long)crc};
        do {
            cursor = dictScan(d, cursor, slotsScanSdsKeyTagCallback, args);
        } while (cursor != 0);
    } while (0);

    int max = listLength(l);
    if (max == 0) {
        listRelease(l);
        return 0;
    }

    robj **keys = zmalloc(sizeof(robj *) * max);
    robj **vals = zmalloc(sizeof(robj *) * max);

    int n = 0;
    for (int i = 0; i < max; i ++) {
        listNode *head = listFirst(l);
        robj *key = listNodeValue(head);
        robj *val = lookupKeyWrite(c->db, key);
        if (val != NULL) {
            keys[n] = key;
            vals[n] = val;
            n ++;
            incrRefCount(key);
        }
        listDelNode(l, head);
    }

    int ret = 0;
    if (n != 0) {
        if (slotsmgrt(c, host, port, fd, c->db->id, timeout, keys, vals, n) != 0) {
            slotsmgrt_close_socket(host, port);
            ret = -1;
        } else {
            slotsremove(c, keys, n, 1);
            ret = n;
        }
    }

    listRelease(l);
    for (int i = 0; i < n; i ++) {
        decrRefCount(keys[i]);
    }
    zfree(keys);
    zfree(vals);
    return ret;
}
Exemplo n.º 2
0
int luaRedisGenericCommand(lua_State *lua, int raise_error) {
    int j, argc = lua_gettop(lua);
    struct redisCommand *cmd;
    redisClient *c = server.lua_client;
    sds reply;

    /* Cached across calls. */
    static robj **argv = NULL;
    static int argv_size = 0;
    static robj *cached_objects[LUA_CMD_OBJCACHE_SIZE];
    static size_t cached_objects_len[LUA_CMD_OBJCACHE_SIZE];
    static int inuse = 0;   /* Recursive calls detection. */

    /* By using Lua debug hooks it is possible to trigger a recursive call
     * to luaRedisGenericCommand(), which normally should never happen.
     * To make this function reentrant is futile and makes it slower, but
     * we should at least detect such a misuse, and abort. */
    if (inuse) {
        char *recursion_warning =
            "luaRedisGenericCommand() recursive call detected. "
            "Are you doing funny stuff with Lua debug hooks?";
        redisLog(REDIS_WARNING,"%s",recursion_warning);
        luaPushError(lua,recursion_warning);
        return 1;
    }
    inuse++;

    /* Require at least one argument */
    if (argc == 0) {
        luaPushError(lua,
            "Please specify at least one argument for redis.call()");
        inuse--;
        return 1;
    }

    /* Build the arguments vector */
    if (argv_size < argc) {
        argv = zrealloc(argv,sizeof(robj*)*argc);
        argv_size = argc;
    }

    for (j = 0; j < argc; j++) {
        char *obj_s;
        size_t obj_len;
        char dbuf[64];

        if (lua_type(lua,j+1) == LUA_TNUMBER) {
            /* We can't use lua_tolstring() for number -> string conversion
             * since Lua uses a format specifier that loses precision. */
            lua_Number num = lua_tonumber(lua,j+1);

            obj_len = snprintf(dbuf,sizeof(dbuf),"%.17g",(double)num);
            obj_s = dbuf;
        } else {
            obj_s = (char*)lua_tolstring(lua,j+1,&obj_len);
            if (obj_s == NULL) break; /* Not a string. */
        }

        /* Try to use a cached object. */
        if (j < LUA_CMD_OBJCACHE_SIZE && cached_objects[j] &&
            cached_objects_len[j] >= obj_len)
        {
            char *s = cached_objects[j]->ptr;
            struct sdshdr *sh = (void*)(s-(sizeof(struct sdshdr)));

            argv[j] = cached_objects[j];
            cached_objects[j] = NULL;
            memcpy(s,obj_s,obj_len+1);
            sh->free += sh->len - obj_len;
            sh->len = obj_len;
        } else {
            argv[j] = createStringObject(obj_s, obj_len);
        }
    }

    /* Check if one of the arguments passed by the Lua script
     * is not a string or an integer (lua_isstring() return true for
     * integers as well). */
    if (j != argc) {
        j--;
        while (j >= 0) {
            decrRefCount(argv[j]);
            j--;
        }
        luaPushError(lua,
            "Lua redis() command arguments must be strings or integers");
        inuse--;
        return 1;
    }

    /* Setup our fake client for command execution */
    c->argv = argv;
    c->argc = argc;

    /* Command lookup */
    cmd = lookupCommand(argv[0]->ptr);
    if (!cmd || ((cmd->arity > 0 && cmd->arity != argc) ||
                   (argc < -cmd->arity)))
    {
        if (cmd)
            luaPushError(lua,
                "Wrong number of args calling Redis command From Lua script");
        else
            luaPushError(lua,"Unknown Redis command called from Lua script");
        goto cleanup;
    }
    c->cmd = cmd;

    /* There are commands that are not allowed inside scripts. */
    if (cmd->flags & REDIS_CMD_NOSCRIPT) {
        luaPushError(lua, "This Redis command is not allowed from scripts");
        goto cleanup;
    }

    /* Write commands are forbidden against read-only slaves, or if a
     * command marked as non-deterministic was already called in the context
     * of this script. */
    if (cmd->flags & REDIS_CMD_WRITE) {
        if (server.lua_random_dirty) {
            luaPushError(lua,
                "Write commands not allowed after non deterministic commands");
            goto cleanup;
        } else if (server.masterhost && server.repl_slave_ro &&
                   !server.loading &&
                   !(server.lua_caller->flags & REDIS_MASTER))
        {
            luaPushError(lua, shared.roslaveerr->ptr);
            goto cleanup;
        } else if (server.stop_writes_on_bgsave_err &&
                   server.saveparamslen > 0 &&
                   server.lastbgsave_status == REDIS_ERR)
        {
            luaPushError(lua, shared.bgsaveerr->ptr);
            goto cleanup;
        }
    }

    /* If we reached the memory limit configured via maxmemory, commands that
     * could enlarge the memory usage are not allowed, but only if this is the
     * first write in the context of this script, otherwise we can't stop
     * in the middle. */
    if (server.maxmemory && server.lua_write_dirty == 0 &&
        (cmd->flags & REDIS_CMD_DENYOOM))
    {
        if (freeMemoryIfNeeded() == REDIS_ERR) {
            luaPushError(lua, shared.oomerr->ptr);
            goto cleanup;
        }
    }

    if (cmd->flags & REDIS_CMD_RANDOM) server.lua_random_dirty = 1;
    if (cmd->flags & REDIS_CMD_WRITE) server.lua_write_dirty = 1;

    /* If this is a Redis Cluster node, we need to make sure Lua is not
     * trying to access non-local keys, with the exception of commands
     * received from our master. */
    if (server.cluster_enabled && !(server.lua_caller->flags & REDIS_MASTER)) {
        /* Duplicate relevant flags in the lua client. */
        c->flags &= ~(REDIS_READONLY|REDIS_ASKING);
        c->flags |= server.lua_caller->flags & (REDIS_READONLY|REDIS_ASKING);
        if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,NULL) !=
                           server.cluster->myself)
        {
            luaPushError(lua,
                "Lua script attempted to access a non local key in a "
                "cluster node");
            goto cleanup;
        }
    }

    /* Run the command */
    call(c,REDIS_CALL_SLOWLOG | REDIS_CALL_STATS);

    /* Convert the result of the Redis command into a suitable Lua type.
     * The first thing we need is to create a single string from the client
     * output buffers. */
    if (listLength(c->reply) == 0 && c->bufpos < REDIS_REPLY_CHUNK_BYTES) {
        /* This is a fast path for the common case of a reply inside the
         * client static buffer. Don't create an SDS string but just use
         * the client buffer directly. */
        c->buf[c->bufpos] = '\0';
        reply = c->buf;
        c->bufpos = 0;
    } else {
        reply = sdsnewlen(c->buf,c->bufpos);
        c->bufpos = 0;
        while(listLength(c->reply)) {
            robj *o = listNodeValue(listFirst(c->reply));

            reply = sdscatlen(reply,o->ptr,sdslen(o->ptr));
            listDelNode(c->reply,listFirst(c->reply));
        }
    }
    if (raise_error && reply[0] != '-') raise_error = 0;
    redisProtocolToLuaType(lua,reply);
    /* Sort the output array if needed, assuming it is a non-null multi bulk
     * reply as expected. */
    if ((cmd->flags & REDIS_CMD_SORT_FOR_SCRIPT) &&
        (reply[0] == '*' && reply[1] != '-')) {
            luaSortArray(lua);
    }
    if (reply != c->buf) sdsfree(reply);
    c->reply_bytes = 0;

cleanup:
    /* Clean up. Command code may have changed argv/argc so we use the
     * argv/argc of the client instead of the local variables. */
    for (j = 0; j < c->argc; j++) {
        robj *o = c->argv[j];

        /* Try to cache the object in the cached_objects array.
         * The object must be small, SDS-encoded, and with refcount = 1
         * (we must be the only owner) for us to cache it. */
        if (j < LUA_CMD_OBJCACHE_SIZE &&
            o->refcount == 1 &&
            (o->encoding == REDIS_ENCODING_RAW ||
             o->encoding == REDIS_ENCODING_EMBSTR) &&
            sdslen(o->ptr) <= LUA_CMD_OBJCACHE_MAX_LEN)
        {
            struct sdshdr *sh = (void*)(((char*)(o->ptr))-(sizeof(struct sdshdr)));

            if (cached_objects[j]) decrRefCount(cached_objects[j]);
            cached_objects[j] = o;
            cached_objects_len[j] = sh->free + sh->len;
        } else {
            decrRefCount(o);
        }
    }

    if (c->argv != argv) {
        zfree(c->argv);
        argv = NULL;
        argv_size = 0;
    }

    if (raise_error) {
        /* If we are here we should have an error in the stack, in the
         * form of a table with an "err" field. Extract the string to
         * return the plain error. */
        lua_pushstring(lua,"err");
        lua_gettable(lua,-2);
        inuse--;
        return lua_error(lua);
    }
    inuse--;
    return 1;
}
Exemplo n.º 3
0
int main(int argc, char **argv){

	//check usage
	if(argc != 4){
		fprintf(stderr, "usage: \"./pagerank\" damping_factor diffPR maxIterations");
		return EXIT_FAILURE;
	}

	//initialise i/o
	FILE *fin = fopen("collection.txt","r");
	FILE *fout = fopen("pagerankList.txt","w");
	double d, diffPR;
	int maxIterations;
	slist urls = newList((void*)strdup, free);
	d = atof(argv[1]);
	diffPR = atof(argv[2]);
	maxIterations = atoi(argv[3]);

	//read collection.txt
	char buffer[BUFF_SIZE];
	while(fscanf(fin, "%s", buffer) != EOF){
		assert(buffer[0]);
		listEnter(urls, buffer);
	}
	
	int len = listLength(urls);
	Url *aurls = malloc(len * sizeof(url));
	Graph g = newGraph(len);
	Hashmap m = newHashmap((len*3)/2);
	
	//convert llist to array for faster reading
	int i;
	for(i = 0; i < len; i++){
		aurls[i] = malloc(sizeof(url));
		aurls[i]->name = (char*)readList(urls);
		aurls[i]->pRank = (1/(double)len);
		aurls[i]->notActuallyZero = 1;
		mapInsert(m, aurls[i]->name, i);
		listNext(urls);
	}
	listReset(urls);

	//build graph
	for(i = 0; i < len; i++){
		strcpy(buffer, aurls[i]->name);
		strcat(buffer, ".txt");
		FILE *webpage = fopen(buffer, "r");
		char *seen = calloc(len, sizeof(char));
		int nOutgoingLinks = 0;

		fscanf(webpage, "#start Section-1");
		while (fscanf(webpage, "%s", buffer) != EOF){
			if(!strcmp(buffer, "#end")){
				break;
			}

			int j = mapSearch(m, buffer);
			if(j != -1 && j != i && !seen[j]){
				insertEdge(g,j,i);
				seen[j] = 1;
				nOutgoingLinks++;
			}
		}

		if(!nOutgoingLinks){
			int j;
			for(j = 0; j < len; j++){
				if(j != i) {
					insertEdge(g, j, i);
				}
			}
			nOutgoingLinks = len - 1;
			aurls[i]->notActuallyZero = 0;
		}
		aurls[i]->outdeg = nOutgoingLinks;
		fclose(webpage);
		free(seen);
	}
	
	dropMap(m);

	//calculate pagerank
	double diff = diffPR, *newPRanks = malloc(len * sizeof(double));
	for(i = 0; i < maxIterations && diff > diffPR - EPS; i++){
		diff = 0;
		
		int j;
		for(j = 0; j < len; j++){
			double sum = 0;

			slist inUrls;
			for(inUrls = GetAdjacencies(g, j); hasNext(inUrls); listNext(inUrls)){
				int val = *(int*)readList(inUrls);
				sum += aurls[val]->pRank / aurls[val]->outdeg;
			}
			listReset(inUrls);

			sum *= d;
			sum += (1 - d)/len;
			diff += fabs(aurls[j]->pRank - sum);
			newPRanks[j] = sum;
		}
		for(j = 0; j < len; j++) {
			aurls[j]->pRank = newPRanks[j];
		}
	}
	free(newPRanks);			

	mergesort((void**)aurls, len, urlComp, 1);
	print(aurls, fout, len);
	
	for(i = 0; i < len; i++){
		free(aurls[i]);
	}

	free(aurls);
	fclose(fin);
	fclose(fout);
	freeList(urls);
	dropGraph(g);
	return EXIT_SUCCESS;
}
Exemplo n.º 4
0
int main(int argc, char *argv[])
{
    List list;
    Data *data[6];

    int i, errors = 0;

    for (i = 0; i < 6; i++) {
        data[i] = calloc(1, sizeof(Data));
    }

    listInitialize(&list);

    TEST_INT(listLength(&list), 0);
    TEST_INT(listIsEmpty(&list),  TRUE);

    listAppendTail(&list, data[0]);
    listAppendTail(&list, data[1]);
    listAppendTail(&list, data[2]);
    listAppendTail(&list, data[3]);

    TEST_INT(listLength(&list), 4);
    TEST_INT(listIsEmpty(&list),  FALSE);

    TEST_PTR(listHead(&list), data[0]);
    TEST_PTR(listTail(&list), data[3]);

    TEST_PTR(listNext(data[0]), data[1]);
    TEST_PTR(listNext(data[1]), data[2]);
    TEST_PTR(listNext(data[2]), data[3]);
    TEST_PTR(listNext(data[3]), NULL);

    TEST_PTR(listPrev(data[3]), data[2]);
    TEST_PTR(listPrev(data[2]), data[1]);
    TEST_PTR(listPrev(data[1]), data[0]);
    TEST_PTR(listPrev(data[0]), NULL);

    TEST_PTR(listContaining(data[0]), &list);
    TEST_PTR(listContaining(data[1]), &list);
    TEST_PTR(listContaining(data[2]), &list);
    TEST_PTR(listContaining(data[3]), &list);

    listRemove(&list, data[0]);
    listRemove(&list, data[1]);
    listRemove(&list, data[2]);
    listRemove(&list, data[3]);

    TEST_INT(listLength(&list), 0);
    TEST_INT(listIsEmpty(&list),  TRUE);

    TEST_PTR(listContaining(data[0]), NULL);
    TEST_PTR(listContaining(data[1]), NULL);
    TEST_PTR(listContaining(data[2]), NULL);
    TEST_PTR(listContaining(data[3]), NULL);

    listInsertHead(&list, data[3]);
    listInsertHead(&list, data[2]);
    listInsertHead(&list, data[1]);
    listInsertHead(&list, data[0]);

    TEST_INT(listLength(&list), 4);
    TEST_INT(listIsEmpty(&list),  FALSE);

    TEST_PTR(listHead(&list), data[0]);
    TEST_PTR(listTail(&list), data[3]);

    TEST_PTR(listNext(data[0]), data[1]);
    TEST_PTR(listNext(data[1]), data[2]);
    TEST_PTR(listNext(data[2]), data[3]);
    TEST_PTR(listNext(data[3]), NULL);

    TEST_PTR(listPrev(data[3]), data[2]);
    TEST_PTR(listPrev(data[2]), data[1]);
    TEST_PTR(listPrev(data[1]), data[0]);
    TEST_PTR(listPrev(data[0]), NULL);

    TEST_PTR(listRemoveHead(&list), data[0]);
    TEST_PTR(listRemoveHead(&list), data[1]);
    TEST_PTR(listRemoveTail(&list), data[3]);
    TEST_PTR(listRemoveTail(&list), data[2]);

    TEST_INT(listLength(&list), 0);
    TEST_INT(listIsEmpty(&list),  TRUE);

    listAppendTail(&list, data[0]);
    listAppendTail(&list, data[3]);
    listAppend(&list, data[1], data[0]);
    listInsert(&list, data[2], data[3]);

    TEST_PTR(listRemoveHead(&list), data[0]);
    TEST_PTR(listRemoveHead(&list), data[1]);
    TEST_PTR(listRemoveTail(&list), data[3]);
    TEST_PTR(listRemoveTail(&list), data[2]);

    data[0]->i = 3;
    data[1]->i = 4;
    data[2]->i = 5;
    data[3]->i = 1;
    data[4]->i = 2;
    data[5]->i = 3;

    listAppendTail(&list, data[0]);
    listAppendTail(&list, data[1]);
    listAppendTail(&list, data[2]);
    listAppendTail(&list, data[3]);
    listAppendTail(&list, data[4]);
    listAppendTail(&list, data[5]);

    listSort(&list, cmp);

    TEST_PTR(listRemoveHead(&list), data[3]);
    TEST_PTR(listRemoveHead(&list), data[4]);
    TEST_PTR(listRemoveHead(&list), data[0]);
    TEST_PTR(listRemoveHead(&list), data[5]);
    TEST_PTR(listRemoveHead(&list), data[1]);
    TEST_PTR(listRemoveHead(&list), data[2]);

    exit(errors);
}
/* Publish a message 
 *
 * 将 message 发送到所有订阅频道 channel 的客户端,
 * 以及所有订阅了和 channel 频道匹配的模式的客户端。

 pubsubPublishMessage函数是PUBLISH命令的实现函数,执行这个函数
等同于执行PUBLISH命令,订阅数据库通知的客户端收到的信息就是由这个函数发出的
 */
int pubsubPublishMessage(robj *channel, robj *message) {
    int receivers = 0;
    dictEntry *de;
    listNode *ln;
    listIter li;

    /* Send to clients listening for that channel */
    // 取出包含所有订阅频道 channel 的客户端的链表
    // 并将消息发送给它们
    de = dictFind(server.pubsub_channels,channel);
    if (de) {
        list *list = dictGetVal(de);
        listNode *ln;
        listIter li;

        // 遍历客户端链表,将 message 发送给它们
        listRewind(list,&li);
        while ((ln = listNext(&li)) != NULL) {
            redisClient *c = ln->value;

            // 回复客户端。
            // 示例:
            // 1) "message"
            // 2) "xxx"
            // 3) "hello"
            addReply(c,shared.mbulkhdr[3]);
            // "message" 字符串
            addReply(c,shared.messagebulk);
            // 消息的来源频道
            addReplyBulk(c,channel);
            // 消息内容
            addReplyBulk(c,message);

            // 接收客户端计数
            receivers++;
        }
    }

    /* Send to clients listening to matching channels */
    // 将消息也发送给那些和频道匹配的模式
    if (listLength(server.pubsub_patterns)) {

        // 遍历模式链表
        listRewind(server.pubsub_patterns,&li);
        channel = getDecodedObject(channel);
        while ((ln = listNext(&li)) != NULL) {

            // 取出 pubsubPattern
            pubsubPattern *pat = ln->value;

            // 如果 channel 和 pattern 匹配
            // 就给所有订阅该 pattern 的客户端发送消息
            if (stringmatchlen((char*)pat->pattern->ptr,
                                sdslen(pat->pattern->ptr),
                                (char*)channel->ptr,
                                sdslen(channel->ptr),0)) {

                // 回复客户端
                // 示例:
                // 1) "pmessage"
                // 2) "*"
                // 3) "xxx"
                // 4) "hello"
                addReply(pat->client,shared.mbulkhdr[4]);
                addReply(pat->client,shared.pmessagebulk);
                addReplyBulk(pat->client,pat->pattern);
                addReplyBulk(pat->client,channel);
                addReplyBulk(pat->client,message);

                // 对接收消息的客户端进行计数
                receivers++;
            }
        }

        decrRefCount(channel);
    }

    // 返回计数
    return receivers;
}
Exemplo n.º 6
0
/* This command implements SCAN, HSCAN and SSCAN commands.
 * If object 'o' is passed, then it must be a Hash or Set object, otherwise
 * if 'o' is NULL the command will operate on the dictionary associated with
 * the current database.
 *
 * When 'o' is not NULL the function assumes that the first argument in
 * the client arguments vector is a key so it skips it before iterating
 * in order to parse options.
 *
 * In the case of a Hash object the function returns both the field and value
 * of every element on the Hash. */
void scanGenericCommand(redisClient *c, robj *o, unsigned long cursor) {
    int i, j;
    list *keys = listCreate();
    listNode *node, *nextnode;
    long count = 10;
    sds pat;
    int patlen, use_pattern = 0;
    dict *ht;

    /* Object must be NULL (to iterate keys names), or the type of the object
     * must be Set, Sorted Set, or Hash. */
    redisAssert(o == NULL || o->type == REDIS_SET || o->type == REDIS_HASH ||
                o->type == REDIS_ZSET);

    /* Set i to the first option argument. The previous one is the cursor. */
    i = (o == NULL) ? 2 : 3; /* Skip the key argument if needed. */

    /* Step 1: Parse options. */
    while (i < c->argc) {
        j = c->argc - i;
        if (!strcasecmp(c->argv[i]->ptr, "count") && j >= 2) {
            if (getLongFromObjectOrReply(c, c->argv[i+1], &count, NULL)
                != REDIS_OK)
            {
                goto cleanup;
            }

            if (count < 1) {
                addReply(c,shared.syntaxerr);
                goto cleanup;
            }

            i += 2;
        } else if (!strcasecmp(c->argv[i]->ptr, "match") && j >= 2) {
            pat = c->argv[i+1]->ptr;
            patlen = sdslen(pat);

            /* The pattern always matches if it is exactly "*", so it is
             * equivalent to disabling it. */
            use_pattern = !(pat[0] == '*' && patlen == 1);

            i += 2;
        } else {
            addReply(c,shared.syntaxerr);
            goto cleanup;
        }
    }

    /* Step 2: Iterate the collection.
     *
     * Note that if the object is encoded with a ziplist, intset, or any other
     * representation that is not a hash table, we are sure that it is also
     * composed of a small number of elements. So to avoid taking state we
     * just return everything inside the object in a single call, setting the
     * cursor to zero to signal the end of the iteration. */

    /* Handle the case of a hash table. */
    ht = NULL;
    if (o == NULL) {
        ht = c->db->dict;
    } else if (o->type == REDIS_SET && o->encoding == REDIS_ENCODING_HT) {
        ht = o->ptr;
    } else if (o->type == REDIS_HASH && o->encoding == REDIS_ENCODING_HT) {
        ht = o->ptr;
        count *= 2; /* We return key / value for this type. */
    } else if (o->type == REDIS_ZSET && o->encoding == REDIS_ENCODING_SKIPLIST) {
        zset *zs = o->ptr;
        ht = zs->dict;
        count *= 2; /* We return key / value for this type. */
    }

    if (ht) {
        void *privdata[2];
        /* We set the max number of iterations to ten times the specified
         * COUNT, so if the hash table is in a pathological state (very
         * sparsely populated) we avoid to block too much time at the cost
         * of returning no or very few elements. */
        long maxiterations = count*10;

        /* We pass two pointers to the callback: the list to which it will
         * add new elements, and the object containing the dictionary so that
         * it is possible to fetch more data in a type-dependent way. */
        privdata[0] = keys;
        privdata[1] = o;
        do {
            cursor = dictScan(ht, cursor, scanCallback, privdata);
        } while (cursor &&
              maxiterations-- &&
              listLength(keys) < (unsigned long)count);
    } else if (o->type == REDIS_SET) {
        int pos = 0;
        int64_t ll;

        while(intsetGet(o->ptr,pos++,&ll))
            listAddNodeTail(keys,createStringObjectFromLongLong(ll));
        cursor = 0;
    } else if (o->type == REDIS_HASH || o->type == REDIS_ZSET) {
        unsigned char *p = ziplistIndex(o->ptr,0);
        unsigned char *vstr;
        unsigned int vlen;
        long long vll;

        while(p) {
            ziplistGet(p,&vstr,&vlen,&vll);
            listAddNodeTail(keys,
                (vstr != NULL) ? createStringObject((char*)vstr,vlen) :
                                 createStringObjectFromLongLong(vll));
            p = ziplistNext(o->ptr,p);
        }
        cursor = 0;
    } else {
        redisPanic("Not handled encoding in SCAN.");
    }

    /* Step 3: Filter elements. */
    node = listFirst(keys);
    while (node) {
        robj *kobj = listNodeValue(node);
        nextnode = listNextNode(node);
        int filter = 0;

        /* Filter element if it does not match the pattern. */
        if (!filter && use_pattern) {
            if (kobj->encoding == REDIS_ENCODING_INT) {
                char buf[REDIS_LONGSTR_SIZE];
                int len;

                redisAssert(kobj->encoding == REDIS_ENCODING_INT);
                len = ll2string(buf,sizeof(buf),(long)kobj->ptr);
                if (!stringmatchlen(pat, patlen, buf, len, 0)) filter = 1;
            } else {
                if (!stringmatchlen(pat, patlen, kobj->ptr, sdslen(kobj->ptr), 0))
                    filter = 1;
            }
        }

        /* Filter element if it is an expired key. */
        if (!filter && o == NULL && expireIfNeeded(c->db, kobj)) filter = 1;

        /* Remove the element and its associted value if needed. */
        if (filter) {
            decrRefCount(kobj);
            listDelNode(keys, node);
        }

        /* If this is a hash or a sorted set, we have a flat list of
         * key-value elements, so if this element was filtered, remove the
         * value, or skip it if it was not filtered: we only match keys. */
        if (o && (o->type == REDIS_ZSET || o->type == REDIS_HASH)) {
            node = nextnode;
            nextnode = listNextNode(node);
            if (filter) {
                kobj = listNodeValue(node);
                decrRefCount(kobj);
                listDelNode(keys, node);
            }
        }
        node = nextnode;
    }

    /* Step 4: Reply to the client. */
    addReplyMultiBulkLen(c, 2);
    addReplyBulkLongLong(c,cursor);

    addReplyMultiBulkLen(c, listLength(keys));
    while ((node = listFirst(keys)) != NULL) {
        robj *kobj = listNodeValue(node);
        addReplyBulk(c, kobj);
        decrRefCount(kobj);
        listDelNode(keys, node);
    }

cleanup:
    listSetFreeMethod(keys,decrRefCountVoid);
    listRelease(keys);
}
Exemplo n.º 7
0
void syncCommand(redisClient *c) {
    /* ignore SYNC if aleady slave or in monitor mode */
    if (c->flags & REDIS_SLAVE) return;

    /* Refuse SYNC requests if we are a slave but the link with our master
     * is not ok... */
    if (server.masterhost && server.repl_state != REDIS_REPL_CONNECTED) {
        addReplyError(c,"Can't SYNC while not connected with my master");
        return;
    }

    /* SYNC can't be issued when the server has pending data to send to
     * the client about already issued commands. We need a fresh reply
     * buffer registering the differences between the BGSAVE and the current
     * dataset, so that we can copy to other slaves if needed. */
    if (listLength(c->reply) != 0) {
        addReplyError(c,"SYNC is invalid with pending input");
        return;
    }

    redisLog(REDIS_NOTICE,"Slave ask for synchronization");
    /* Here we need to check if there is a background saving operation
     * in progress, or if it is required to start one */
    if (server.rdb_child_pid != -1) {
        /* Ok a background save is in progress. Let's check if it is a good
         * one for replication, i.e. if there is another slave that is
         * registering differences since the server forked to save */
        redisClient *slave;
        listNode *ln;
        listIter li;

        listRewind(server.slaves,&li);
        while((ln = listNext(&li))) {
            slave = ln->value;
            if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_END) break;
        }
        if (ln) {
            /* Perfect, the server is already registering differences for
             * another slave. Set the right state, and copy the buffer. */
            copyClientOutputBuffer(c,slave);
            c->replstate = REDIS_REPL_WAIT_BGSAVE_END;
            redisLog(REDIS_NOTICE,"Waiting for end of BGSAVE for SYNC");
        } else {
            /* No way, we need to wait for the next BGSAVE in order to
             * register differences */
            c->replstate = REDIS_REPL_WAIT_BGSAVE_START;
            redisLog(REDIS_NOTICE,"Waiting for next BGSAVE for SYNC");
        }
    } else {
        /* Ok we don't have a BGSAVE in progress, let's start one */
        redisLog(REDIS_NOTICE,"Starting BGSAVE for SYNC");
        if (rdbSaveBackground(server.rdb_filename) != REDIS_OK) {
            redisLog(REDIS_NOTICE,"Replication failed, can't BGSAVE");
            addReplyError(c,"Unable to perform background save");
            return;
        }
        c->replstate = REDIS_REPL_WAIT_BGSAVE_END;
    }
    c->repldbfd = -1;
    c->flags |= REDIS_SLAVE;
    c->slaveseldb = 0;
    listAddNodeTail(server.slaves,c);
    return;
}
Exemplo n.º 8
0
void rsql_resetFakeClient(struct redisClient *c) {
    /* Discard the reply objects list from the fake client */
    while(listLength(c->reply))
        listDelNode(c->reply, listFirst(c->reply));

}
Exemplo n.º 9
0
/* 对应事务指令exec, 执行已经设置的事务指令 */
void execCommand(client *c) {
    int j;
    robj **orig_argv;
    int orig_argc;
    struct redisCommand *orig_cmd;
    int must_propagate = 0; /* Need to propagate MULTI/EXEC to AOF / slaves? */

    if (!(c->flags & CLIENT_MULTI)) {
        addReplyError(c,"EXEC without MULTI");
        return;
    }

    /* Check if we need to abort the EXEC because:
     * 1) Some WATCHed key was touched.
     * 2) There was a previous error while queueing commands.
     * A failed EXEC in the first case returns a multi bulk nil object
     * (technically it is not an error but a special behavior), while
     * in the second an EXECABORT error is returned. 
     *
     * 检查是否需要放弃事务指令集
     * 1) 触发了一些WATCH的键
     * 2) 存储事务指令时发生了错误 */
    if (c->flags & (CLIENT_DIRTY_CAS|CLIENT_DIRTY_EXEC)) {
        addReply(c, c->flags & CLIENT_DIRTY_EXEC ? shared.execaborterr :
                                                  shared.nullmultibulk);
        discardTransaction(c);      /* 放弃事务 */
        goto handle_monitor;
    }

    /* 逐个执行所有的事务指令集, Exec all the queued commands */
    unwatchAllKeys(c);      /* 执行事务前释放监控的键 */
    orig_argv = c->argv;
    orig_argc = c->argc;
    orig_cmd = c->cmd;
    addReplyMultiBulkLen(c,c->mstate.count);        /* 多应答块儿个数 */
    for (j = 0; j < c->mstate.count; j++) {
        c->argc = c->mstate.commands[j].argc;
        c->argv = c->mstate.commands[j].argv;
        c->cmd = c->mstate.commands[j].cmd;

        /* Propagate a MULTI request once we encounter the first write op.
         * This way we'll deliver the MULTI/..../EXEC block as a whole and
         * both the AOF and the replication link will have the same consistency
         * and atomicity guarantees. */
        if (!must_propagate && !(c->cmd->flags & CMD_READONLY)) {
            execCommandPropagateMulti(c);
            must_propagate = 1;
        }

        /* 执行事务指令 */
        call(c,CMD_CALL_FULL);

        /* Commands may alter argc/argv, restore mstate. */
        c->mstate.commands[j].argc = c->argc;
        c->mstate.commands[j].argv = c->argv;
        c->mstate.commands[j].cmd = c->cmd;
    }
    c->argv = orig_argv;
    c->argc = orig_argc;
    c->cmd = orig_cmd;
    discardTransaction(c);
    /* Make sure the EXEC command will be propagated as well if MULTI
     * was already propagated. */
    if (must_propagate) server.dirty++;

handle_monitor:
    /* Send EXEC to clients waiting data from MONITOR. We do it here
     * since the natural order of commands execution is actually:
     * MUTLI, EXEC, ... commands inside transaction ...
     * Instead EXEC is flagged as CMD_SKIP_MONITOR in the command
     * table, and we do it here with correct ordering. 
     *
     * 向监控客户端发送执行的指令, 因为multi和exec之间的部分仅仅暂存指令,
     * 而没有执行, 因此此处补发指令的执行 */
    if (listLength(server.monitors) && !server.loading)
        replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc);
}
Exemplo n.º 10
0
Arquivo: vm.c Projeto: ambakshi/redis
/* How a good candidate is this object for swapping?
 * The better candidate it is, the greater the returned value.
 *
 * Currently we try to perform a fast estimation of the object size in
 * memory, and combine it with aging informations.
 *
 * Basically swappability = idle-time * log(estimated size)
 *
 * Bigger objects are preferred over smaller objects, but not
 * proportionally, this is why we use the logarithm. This algorithm is
 * just a first try and will probably be tuned later. */
double computeObjectSwappability(robj *o) {
    /* actual age can be >= minage, but not < minage. As we use wrapping
     * 21 bit clocks with minutes resolution for the LRU. */
    time_t minage = estimateObjectIdleTime(o);
#ifdef _WIN32
    ssize_t asize = 0, elesize;
#else
    long asize = 0, elesize;
#endif
    robj *ele;
    list *l;
    listNode *ln;
    dict *d;
    struct dictEntry *de;

    if (minage <= 0) return 0;
    switch(o->type) {
    case REDIS_STRING:
        if (o->encoding != REDIS_ENCODING_RAW) {
            asize = sizeof(*o);
        } else {
#ifdef _WIN32
            asize = sdslen(o->ptr)+sizeof(*o)+sizeof(size_t)*2;
#else
            asize = sdslen(o->ptr)+sizeof(*o)+sizeof(long)*2;
#endif
        }
        break;
    case REDIS_LIST:
        if (o->encoding == REDIS_ENCODING_ZIPLIST) {
            asize = sizeof(*o)+ziplistBlobLen(o->ptr);
        } else {
            l = o->ptr;
            ln = listFirst(l);
            asize = sizeof(list);
            if (ln) {
                ele = ln->value;
                elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
                                (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
                asize += (sizeof(listNode)+elesize)*listLength(l);
            }
        }
        break;
    case REDIS_SET:
        if (o->encoding == REDIS_ENCODING_INTSET) {
            intset *is = o->ptr;
            asize = sizeof(*is)+is->encoding*is->length;
        } else {
            d = o->ptr;
            asize = sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d));
            if (dictSize(d)) {
                de = dictGetRandomKey(d);
                ele = dictGetEntryKey(de);
                elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
                                (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
                asize += (sizeof(struct dictEntry)+elesize)*dictSize(d);
            }
        }
        break;
    case REDIS_ZSET:
        if (o->encoding == REDIS_ENCODING_ZIPLIST) {
            asize = sizeof(*o)+(ziplistBlobLen(o->ptr) / 2);
        } else {
            d = ((zset*)o->ptr)->dict;
            asize = sizeof(zset)+(sizeof(struct dictEntry*)*dictSlots(d));
            if (dictSize(d)) {
                de = dictGetRandomKey(d);
                ele = dictGetEntryKey(de);
                elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
                                (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
                asize += (sizeof(struct dictEntry)+elesize)*dictSize(d);
                asize += sizeof(zskiplistNode)*dictSize(d);
            }
        }
        break;
    case REDIS_HASH:
        if (o->encoding == REDIS_ENCODING_ZIPMAP) {
            unsigned char *p = zipmapRewind((unsigned char*)o->ptr);
            unsigned int len = zipmapLen((unsigned char*)o->ptr);
            unsigned int klen, vlen;
            unsigned char *key, *val;

            if ((p = zipmapNext(p,&key,&klen,&val,&vlen)) == NULL) {
                klen = 0;
                vlen = 0;
            }
            asize = len*(klen+vlen+3);
        } else if (o->encoding == REDIS_ENCODING_HT) {
            d = o->ptr;
            asize = sizeof(dict)+(sizeof(struct dictEntry*)*dictSlots(d));
            if (dictSize(d)) {
                de = dictGetRandomKey(d);
                ele = dictGetEntryKey(de);
                elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
                                (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
                ele = dictGetEntryVal(de);
                elesize = (ele->encoding == REDIS_ENCODING_RAW) ?
                                (sizeof(*o)+sdslen(ele->ptr)) : sizeof(*o);
                asize += (sizeof(struct dictEntry)+elesize)*dictSize(d);
            }
        }
        break;
    }
    return (double)minage*log(1+(double)asize);
}
Exemplo n.º 11
0
Arquivo: vm.c Projeto: ambakshi/redis
/* Every time a thread finished a Job, it writes a byte into the write side
 * of an unix pipe in order to "awake" the main thread, and this function
 * is called.
 *
 * Note that this is called both by the event loop, when a I/O thread
 * sends a byte in the notification pipe, and is also directly called from
 * waitEmptyIOJobsQueue().
 *
 * In the latter case we don't want to swap more, so we use the
 * "privdata" argument setting it to a not NULL value to signal this
 * condition. */
void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata,
            int mask)
{
    char buf[1];
    int retval, processed = 0, toprocess = -1, trytoswap = 1;
    REDIS_NOTUSED(el);
    REDIS_NOTUSED(mask);
    REDIS_NOTUSED(privdata);

    if (privdata != NULL) trytoswap = 0; /* check the comments above... */

    /* For every byte we read in the read side of the pipe, there is one
     * I/O job completed to process. */
#ifndef _WIN32
    while((retval = read(fd,buf,1)) == 1) {
#else
    DWORD pipe_is_on = 0;

    while (1) {
        retval = 0;
        /*Windows fix: We need to peek pipe, since read would block. */
        if (!PeekNamedPipe((HANDLE) _get_osfhandle(fd), NULL, 0, NULL, &pipe_is_on, NULL)) {
           redisLog(REDIS_DEBUG,"PeekReadPipe failed %s", strerror(GetLastError()));
           break;
        }

        /* No data on pipe */
        if (!pipe_is_on)
            break;

        if ((retval = read(fd,buf,1)) != 1)
            break;
#endif
        iojob *j;
        listNode *ln;
        struct dictEntry *de;

        /* Get the processed element (the oldest one) */
        lockThreadedIO();
        redisLog(REDIS_DEBUG,"Processing I/O completed job");
        redisAssert(listLength(server.io_processed) != 0);
        if (toprocess == -1) {
            toprocess = (listLength(server.io_processed)*REDIS_MAX_COMPLETED_JOBS_PROCESSED)/100;
            if (toprocess <= 0) toprocess = 1;
        }
        ln = listFirst(server.io_processed);
        j = ln->value;
        listDelNode(server.io_processed,ln);
        unlockThreadedIO();
        /* If this job is marked as canceled, just ignore it */
        if (j->canceled) {
            freeIOJob(j);
            continue;
        }
        /* Post process it in the main thread, as there are things we
         * can do just here to avoid race conditions and/or invasive locks */
        redisLog(REDIS_DEBUG,"COMPLETED Job type: %d, ID %p, key: %s", j->type, (void*)j->id, (unsigned char*)j->key->ptr);
        de = dictFind(j->db->dict,j->key->ptr);
        redisAssert(de != NULL);
        if (j->type == REDIS_IOJOB_LOAD) {
            redisDb *db;
            vmpointer *vp = dictGetEntryVal(de);

            /* Key loaded, bring it at home */
            vmMarkPagesFree(vp->page,vp->usedpages);
            redisLog(REDIS_DEBUG, "VM: object %s loaded from disk (threaded)",
                (unsigned char*) j->key->ptr);
            server.vm_stats_swapped_objects--;
            server.vm_stats_swapins++;
            dictGetEntryVal(de) = j->val;
            incrRefCount(j->val);
            db = j->db;
            /* Handle clients waiting for this key to be loaded. */
            handleClientsBlockedOnSwappedKey(db,j->key);
            freeIOJob(j);
            zfree(vp);
        } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) {
            /* Now we know the amount of pages required to swap this object.
             * Let's find some space for it, and queue this task again
             * rebranded as REDIS_IOJOB_DO_SWAP. */
            if (!vmCanSwapOut() ||
                vmFindContiguousPages(&j->page,j->pages) == REDIS_ERR)
            {
                /* Ooops... no space or we can't swap as there is
                 * a fork()ed Redis trying to save stuff on disk. */
                j->val->storage = REDIS_VM_MEMORY; /* undo operation */
                freeIOJob(j);
            } else {
                /* Note that we need to mark this pages as used now,
                 * if the job will be canceled, we'll mark them as freed
                 * again. */
                vmMarkPagesUsed(j->page,j->pages);
                j->type = REDIS_IOJOB_DO_SWAP;
                lockThreadedIO();
                queueIOJob(j);
                unlockThreadedIO();
            }
        } else if (j->type == REDIS_IOJOB_DO_SWAP) {
            vmpointer *vp;

            /* Key swapped. We can finally free some memory. */
            if (j->val->storage != REDIS_VM_SWAPPING) {
                vmpointer *vp = (vmpointer*) j->id;
                printf("storage: %d\n",vp->storage);
                printf("key->name: %s\n",(char*)j->key->ptr);
                printf("val: %p\n",(void*)j->val);
                printf("val->type: %d\n",j->val->type);
                printf("val->ptr: %s\n",(char*)j->val->ptr);
            }
            redisAssert(j->val->storage == REDIS_VM_SWAPPING);
            vp = createVmPointer(j->val);
            vp->page = j->page;
            vp->usedpages = j->pages;
            dictGetEntryVal(de) = vp;
            /* Fix the storage otherwise decrRefCount will attempt to
             * remove the associated I/O job */
            j->val->storage = REDIS_VM_MEMORY;
            decrRefCount(j->val);
            redisLog(REDIS_DEBUG,
                "VM: object %s swapped out at %lld (%lld pages) (threaded)",
                (unsigned char*) j->key->ptr,
                (unsigned long long) j->page, (unsigned long long) j->pages);
            server.vm_stats_swapped_objects++;
            server.vm_stats_swapouts++;
            freeIOJob(j);
            /* Put a few more swap requests in queue if we are still
             * out of memory */
            if (trytoswap && vmCanSwapOut() &&
                zmalloc_used_memory() > server.vm_max_memory)
            {
                int more = 1;
                while(more) {
                    lockThreadedIO();
                    more = listLength(server.io_newjobs) <
                            (unsigned) server.vm_max_threads;
                    unlockThreadedIO();
                    /* Don't waste CPU time if swappable objects are rare. */
                    if (vmSwapOneObjectThreaded() == REDIS_ERR) {
                        trytoswap = 0;
                        break;
                    }
                }
            }
        }
        processed++;
        if (processed == toprocess) return;
    }
    if (retval < 0 && errno != EAGAIN) {
        redisLog(REDIS_WARNING,
            "WARNING: read(2) error in vmThreadedIOCompletedJob() %s",
            strerror(errno));
    }
}

void lockThreadedIO(void) {
    pthread_mutex_lock(&server.io_mutex);
}
Exemplo n.º 12
0
int
main(int argc, char **argv)
{
    int argn;
    FT_Error ftrc;
    int rc, ll = 0;
    char prefix[NPREFIX];

    ProgramName = argv[0];
    encodingPrefix = NULL;
    exclusionSuffix = NULL;

    if(getcwd(prefix, NPREFIX - 1) == NULL) {
        perror("Couldn't get cwd");
        exit(1);
    }
    if(prefix[strlen(prefix) - 1] != '/')
        strcat(prefix, "/");
    encodingPrefix = dsprintf("%s", prefix);

    outfilename = NULL;

    encodings = makeList((char **)encodings_array, countof(encodings_array), NULL, 0);

    extra_encodings = makeList((char**)extra_encodings_array,
                               countof(extra_encodings_array),
                               NULL, 0);
    doBitmaps = 0;
    doISO10646_1_encoding = 1;
    doScalable = 1;
    onlyEncodings = 0;
    relative = 0;
    reencodeLegacy = 1;
    encodingsToDo = NULL;

    argn = 1;
    while(argn < argc) {
        if(argv[argn][0] == '\0' || argv[argn][0] != '-')
            break;
        if(argv[argn][1] == '-') {
            argn++;
            break;
        } else if (strcmp(argv[argn], "-x") == 0) {
            if(argn >= argc - 1) {
                missing_arg("-x");
            }
            exclusionSuffix = argv[argn + 1];
            argn += 2;
        } else if(strcmp(argv[argn], "-a") == 0) {
            if(argn >= argc - 1) {
                missing_arg("-a");
            }
            makeList(&argv[argn + 1], 1, encodings, 0);
            argn += 2;
        } else if(strcmp(argv[argn], "-p") == 0) {
            if(argn >= argc - 1) {
                missing_arg("-p");
            }
            if(strlen(argv[argn + 1]) > NPREFIX - 1) {
                fprintf(stderr, "%s: argument to -p cannot be longer than "
                        "%d characters\n", ProgramName, NPREFIX - 1);
                usage();
            }
            free(encodingPrefix);
            encodingPrefix = dsprintf("%s", argv[argn + 1]);
            argn += 2;
        } else if(strcmp(argv[argn], "-e") == 0) {
            if(argn >= argc - 1) {
                missing_arg("-e");
            }
            rc = readEncodings(encodingsToDo, argv[argn + 1]);
            if(rc < 0)
                exit(1);
            argn += 2;
        } else if(strcmp(argv[argn], "-b") == 0) {
            doBitmaps = 1;
            argn++;
        } else if(strcmp(argv[argn], "-u") == 0) {
            doISO10646_1_encoding = 0;
            argn++;
        } else if(strcmp(argv[argn], "-U") == 0) {
            doISO10646_1_encoding = 1;
            argn++;
        } else if(strcmp(argv[argn], "-s") == 0) {
            doScalable = 0;
            argn++;
        } else if(strcmp(argv[argn], "-n") == 0) {
            onlyEncodings = 1;
            argn++;
        } else if(strcmp(argv[argn], "-r") == 0) {
            relative = 1;
            argn++;
        } else if(strcmp(argv[argn], "-l") == 0) {
            reencodeLegacy = !reencodeLegacy;
            argn++;
        } else if(strcmp(argv[argn], "-o") == 0) {
            if(argn >= argc - 1) {
                missing_arg("-o");
            }
            outfilename = argv[argn + 1];
            argn += 2;
        } else if(strcmp(argv[argn], "-f") == 0) {
            if(argn >= argc - 1) {
                missing_arg("-f");
            }
            bigEncodingFuzz = atof(argv[argn + 1]) / 100.0;
            argn += 2;
	} else if (strcmp(argv[argn], "-v") == 0) {
	    printf("%s\n", PACKAGE_STRING);
	    exit(0);
	} else {
            usage();
        }
    }

    if(outfilename == NULL) {
        if(doBitmaps)
            outfilename = "fonts.dir";
        else
            outfilename = "fonts.scale";
    }

    ftrc = FT_Init_FreeType(&ft_library);
    if(ftrc) {
        fprintf(stderr, "Could not initialise FreeType library: %d\n", ftrc);
        exit(1);
    }

    ll = listLength(encodingsToDo);

    if (argn == argc)
        doDirectory(".", ll, encodingsToDo);
    else
        while(argn < argc) {
            doDirectory(argv[argn], ll, encodingsToDo);
            argn++;
        }
    return 0;
}
Exemplo n.º 13
0
void execCommand(redisClient *c) {
    int j;
    robj **orig_argv;
    int orig_argc;
    struct redisCommand *orig_cmd;

    if (!(c->flags & REDIS_MULTI)) {
        addReplyError(c,"EXEC without MULTI");
        return;
    }

    /* Check if we need to abort the EXEC if some WATCHed key was touched.
     * A failed EXEC will return a multi bulk nil object. */
    if (c->flags & REDIS_DIRTY_CAS) {
        freeClientMultiState(c);
        initClientMultiState(c);
        c->flags &= ~(REDIS_MULTI|REDIS_DIRTY_CAS);
        unwatchAllKeys(c);
        addReply(c,shared.nullmultibulk);
        goto handle_monitor;
    }

    /* Replicate a MULTI request now that we are sure the block is executed.
     * This way we'll deliver the MULTI/..../EXEC block as a whole and
     * both the AOF and the replication link will have the same consistency
     * and atomicity guarantees. */
    execCommandReplicateMulti(c);

    /* Exec all the queued commands */
    unwatchAllKeys(c); /* Unwatch ASAP otherwise we'll waste CPU cycles */
    orig_argv = c->argv;
    orig_argc = c->argc;
    orig_cmd = c->cmd;
    addReplyMultiBulkLen(c,c->mstate.count);
    for (j = 0; j < c->mstate.count; j++) {
        c->argc = c->mstate.commands[j].argc;
        c->argv = c->mstate.commands[j].argv;
        c->cmd = c->mstate.commands[j].cmd;
        call(c,REDIS_CALL_FULL);

        /* Commands may alter argc/argv, restore mstate. */
        c->mstate.commands[j].argc = c->argc;
        c->mstate.commands[j].argv = c->argv;
        c->mstate.commands[j].cmd = c->cmd;
    }
    c->argv = orig_argv;
    c->argc = orig_argc;
    c->cmd = orig_cmd;
    freeClientMultiState(c);
    initClientMultiState(c);
    c->flags &= ~(REDIS_MULTI|REDIS_DIRTY_CAS);
    /* Make sure the EXEC command is always replicated / AOF, since we
     * always send the MULTI command (we can't know beforehand if the
     * next operations will contain at least a modification to the DB). */
    server.dirty++;

handle_monitor:
    /* Send EXEC to clients waiting data from MONITOR. We do it here
     * since the natural order of commands execution is actually:
     * MUTLI, EXEC, ... commands inside transaction ...
     * Instead EXEC is flagged as REDIS_CMD_SKIP_MONITOR in the command
     * table, and we do it here with correct ordering. */
    if (listLength(server.monitors) && !server.loading)
        replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc);
}
//监视机制触发见touchWatchedKey决定是否触发REDIS_DIRTY_CAS   取消事物函数更具watch的键是否有触发REDIS_DIRTY_CAS来决定是否继续
//执行事物中的命令,见execCommand。//取消watch见unwatchAllKeys
void execCommand(redisClient *c) {
    int j;
    robj **orig_argv;
    int orig_argc;
    struct redisCommand *orig_cmd;
    int must_propagate = 0; /* Need to propagate MULTI/EXEC to AOF / slaves? */

    // 客户端没有执行事务
    if (!(c->flags & REDIS_MULTI)) {
        addReplyError(c,"EXEC without MULTI");
        return;
    }

    /* Check if we need to abort the EXEC because:
     *
     * 检查是否需要阻止事务执行,因为:
     *
     * 1) Some WATCHed key was touched.
     *    有被监视的键已经被修改了
     *
     * 2) There was a previous error while queueing commands.
     *    命令在入队时发生错误
     *    (注意这个行为是 2.6.4 以后才修改的,之前是静默处理入队出错命令)
     *
     * A failed EXEC in the first case returns a multi bulk nil object
     * (technically it is not an error but a special behavior), while
     * in the second an EXECABORT error is returned. 
     *
     * 第一种情况返回多个批量回复的空对象
     * 而第二种情况则返回一个 EXECABORT 错误

     原子性:事务具有原子性指的是,数据库将事务中的多个操作当作一个整体来执行,服务器要么
就执行事务中的所有操作,要么就一个操作也不执行。
     */ 
    if (c->flags & (REDIS_DIRTY_CAS|REDIS_DIRTY_EXEC)) {//入队命令中的任何一个出错,或者命令key被watch监视,并且该key在执行watch和exec命令之间发生了变化

        addReply(c, c->flags & REDIS_DIRTY_EXEC ? shared.execaborterr :
                                                  shared.nullmultibulk);

        // 取消事务
        discardTransaction(c);

        goto handle_monitor;
    }

    /* Exec all the queued commands */
    // 已经可以保证安全性了,取消客户端对所有键的监视
    unwatchAllKeys(c); /* Unwatch ASAP otherwise we'll waste CPU cycles */

    // 因为事务中的命令在执行时可能会修改命令和命令的参数
    // 所以为了正确地传播命令,需要现备份这些命令和参数
    orig_argv = c->argv;
    orig_argc = c->argc;
    orig_cmd = c->cmd;

    addReplyMultiBulkLen(c,c->mstate.count);

    // 执行事务中的命令
    for (j = 0; j < c->mstate.count; j++) {

        // 因为 Redis 的命令必须在客户端的上下文中执行
        // 所以要将事务队列中的命令、命令参数等设置给客户端
        c->argc = c->mstate.commands[j].argc;
        c->argv = c->mstate.commands[j].argv;
        c->cmd = c->mstate.commands[j].cmd;

        /* Propagate a MULTI request once we encounter the first write op.
         *
         * 当遇上第一个写命令时,传播 MULTI 命令。
         *
         * This way we'll deliver the MULTI/..../EXEC block as a whole and
         * both the AOF and the replication link will have the same consistency
         * and atomicity guarantees. 
         *
         * 这可以确保服务器和 AOF 文件以及附属节点的数据一致性。
         */
        if (!must_propagate && !(c->cmd->flags & REDIS_CMD_READONLY)) {

            // 传播 MULTI 命令
            execCommandPropagateMulti(c);

            // 计数器,只发送一次
            must_propagate = 1;
        }

        // 执行命令
        call(c,REDIS_CALL_FULL);

        /* Commands may alter argc/argv, restore mstate. */
        // 因为执行后命令、命令参数可能会被改变
        // 比如 SPOP 会被改写为 SREM
        // 所以这里需要更新事务队列中的命令和参数
        // 确保附属节点和 AOF 的数据一致性
        c->mstate.commands[j].argc = c->argc;
        c->mstate.commands[j].argv = c->argv;
        c->mstate.commands[j].cmd = c->cmd;
    }

    // 还原命令、命令参数
    c->argv = orig_argv;
    c->argc = orig_argc;
    c->cmd = orig_cmd;

    // 清理事务状态
    discardTransaction(c);

    /* Make sure the EXEC command will be propagated as well if MULTI
     * was already propagated. */
    // 将服务器设为脏,确保 EXEC 命令也会被传播
    if (must_propagate) server.dirty++;

handle_monitor:
    /* Send EXEC to clients waiting data from MONITOR. We do it here
     * since the natural order of commands execution is actually:
     * MUTLI, EXEC, ... commands inside transaction ...
     * Instead EXEC is flagged as REDIS_CMD_SKIP_MONITOR in the command
     * table, and we do it here with correct ordering. */
    if (listLength(server.monitors) && !server.loading)
        replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc);
}
Exemplo n.º 15
0
/* Return the number of channels + patterns a client is subscribed to. */
int clientSubscriptionsCount(client *c) {
    return dictSize(c->pubsub_channels)+
           listLength(c->pubsub_patterns);
}
Exemplo n.º 16
0
Arquivo: evict.c Projeto: Xwuming/misc
int freeMemoryIfNeeded(void) {
    size_t mem_reported, mem_used, mem_tofree, mem_freed;
    int slaves = listLength(server.slaves);
    mstime_t latency, eviction_latency;
    long long delta;

    /* Check if we are over the memory usage limit. If we are not, no need
     * to subtract the slaves output buffers. We can just return ASAP. */
    mem_reported = zmalloc_used_memory();
    if (mem_reported <= server.maxmemory) return C_OK;

    /* Remove the size of slaves output buffers and AOF buffer from the
     * count of used memory. */
    mem_used = mem_reported;
    if (slaves) {
        listIter li;
        listNode *ln;

        listRewind(server.slaves,&li);
        while((ln = listNext(&li))) {
            client *slave = listNodeValue(ln);
            unsigned long obuf_bytes = getClientOutputBufferMemoryUsage(slave);
            if (obuf_bytes > mem_used)
                mem_used = 0;
            else
                mem_used -= obuf_bytes;
        }
    }
    if (server.aof_state != AOF_OFF) {
        mem_used -= sdslen(server.aof_buf);
        mem_used -= aofRewriteBufferSize();
    }

    /* Check if we are still over the memory limit. */
    if (mem_used <= server.maxmemory) return C_OK;

    /* Compute how much memory we need to free. */
    mem_tofree = mem_used - server.maxmemory;
    mem_freed = 0;

    if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION)
        goto cant_free; /* We need to free memory, but policy forbids. */

    latencyStartMonitor(latency);
    while (mem_freed < mem_tofree) {
        int j, k, i, keys_freed = 0;
        static int next_db = 0;
        sds bestkey = NULL;
        int bestdbid;
        redisDb *db;
        dict *dict;
        dictEntry *de;

        if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_LRU ||
            server.maxmemory_policy == MAXMEMORY_VOLATILE_LRU)
        {
            struct evictionPoolEntry *pool = EvictionPoolLRU;

            while(bestkey == NULL) {
                unsigned long total_keys = 0, keys;

                /* We don't want to make local-db choices when expiring keys,
                 * so to start populate the eviction pool sampling keys from
                 * every DB. */
                for (i = 0; i < server.dbnum; i++) {
                    db = server.db+i;
                    dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_LRU) ?
                            db->dict : db->expires;
                    if ((keys = dictSize(dict)) != 0) {
                        evictionPoolPopulate(i, dict, db->dict, pool);
                        total_keys += keys;
                    }
                }
                if (!total_keys) break; /* No keys to evict. */

                /* Go backward from best to worst element to evict. */
                for (k = EVPOOL_SIZE-1; k >= 0; k--) {
                    if (pool[k].key == NULL) continue;
                    bestdbid = pool[k].dbid;

                    if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_LRU) {
                        de = dictFind(server.db[pool[k].dbid].dict,
                            pool[k].key);
                    } else {
                        de = dictFind(server.db[pool[k].dbid].expires,
                            pool[k].key);
                    }

                    /* Remove the entry from the pool. */
                    if (pool[k].key != pool[k].cached)
                        sdsfree(pool[k].key);
                    pool[k].key = NULL;
                    pool[k].idle = 0;

                    /* If the key exists, is our pick. Otherwise it is
                     * a ghost and we need to try the next element. */
                    if (de) {
                        bestkey = dictGetKey(de);
                        break;
                    } else {
                        /* Ghost... Iterate again. */
                    }
                }
            }
        }

        /* volatile-random and allkeys-random policy */
        else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM ||
                 server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM)
        {
            /* When evicting a random key, we try to evict a key for
             * each DB, so we use the static 'next_db' variable to
             * incrementally visit all DBs. */
            for (i = 0; i < server.dbnum; i++) {
                j = (++next_db) % server.dbnum;
                db = server.db+j;
                dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ?
                        db->dict : db->expires;
                if (dictSize(dict) != 0) {
                    de = dictGetRandomKey(dict);
                    bestkey = dictGetKey(de);
                    bestdbid = j;
                    break;
                }
            }
        }

        /* volatile-ttl */
        else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
            long bestttl = 0; /* Initialized to avoid warning. */

            /* In this policy we scan a single DB per iteration (visiting
             * a different DB per call), expiring the key with the smallest
             * TTL among the few sampled.
             *
             * Note that this algorithm makes local-DB choices, and should
             * use a pool and code more similr to the one used in the
             * LRU eviction policies in the future. */
            for (i = 0; i < server.dbnum; i++) {
                j = (++next_db) % server.dbnum;
                db = server.db+j;
                dict = db->expires;
                if (dictSize(dict) != 0) {
                    for (k = 0; k < server.maxmemory_samples; k++) {
                        sds thiskey;
                        long thisttl;

                        de = dictGetRandomKey(dict);
                        thiskey = dictGetKey(de);
                        thisttl = (long) dictGetVal(de);

                        /* Keys expiring sooner (smaller unix timestamp) are
                         * better candidates for deletion */
                        if (bestkey == NULL || thisttl < bestttl) {
                            bestkey = thiskey;
                            bestttl = thisttl;
                            bestdbid = j;
                        }
                    }
                }
            }
        }

        /* Finally remove the selected key. */
        if (bestkey) {
            db = server.db+bestdbid;
            robj *keyobj = createStringObject(bestkey,sdslen(bestkey));
            propagateExpire(db,keyobj,server.lazyfree_lazy_eviction);
            /* We compute the amount of memory freed by db*Delete() alone.
             * It is possible that actually the memory needed to propagate
             * the DEL in AOF and replication link is greater than the one
             * we are freeing removing the key, but we can't account for
             * that otherwise we would never exit the loop.
             *
             * AOF and Output buffer memory will be freed eventually so
             * we only care about memory used by the key space. */
            delta = (long long) zmalloc_used_memory();
            latencyStartMonitor(eviction_latency);
            if (server.lazyfree_lazy_eviction)
                dbAsyncDelete(db,keyobj);
            else
                dbSyncDelete(db,keyobj);
            latencyEndMonitor(eviction_latency);
            latencyAddSampleIfNeeded("eviction-del",eviction_latency);
            latencyRemoveNestedEvent(latency,eviction_latency);
            delta -= (long long) zmalloc_used_memory();
            mem_freed += delta;
            server.stat_evictedkeys++;
            notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted",
                keyobj, db->id);
            decrRefCount(keyobj);
            keys_freed++;

            /* When the memory to free starts to be big enough, we may
             * start spending so much time here that is impossible to
             * deliver data to the slaves fast enough, so we force the
             * transmission here inside the loop. */
            if (slaves) flushSlavesOutputBuffers();
        }

        if (!keys_freed) {
            latencyEndMonitor(latency);
            latencyAddSampleIfNeeded("eviction-cycle",latency);
            goto cant_free; /* nothing to free... */
        }
    }
    latencyEndMonitor(latency);
    latencyAddSampleIfNeeded("eviction-cycle",latency);
    return C_OK;

cant_free:
    /* We are here if we are not able to reclaim memory. There is only one
     * last thing we can try: check if the lazyfree thread has jobs in queue
     * and wait... */
    while(bioPendingJobsOfType(BIO_LAZY_FREE)) {
        if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree)
            break;
        usleep(1000);
    }
    return C_ERR;
}
Exemplo n.º 17
0
int luaRedisGenericCommand(lua_State *lua, int raise_error) {
    int j, argc = lua_gettop(lua);
    struct redisCommand *cmd;
    robj **argv;
    redisClient *c = server.lua_client;
    sds reply;

    // 参数必须大于零
    /* Require at least one argument */
    if (argc == 0) {
        luaPushError(lua,
            "Please specify at least one argument for redis.call()");
        return 1;
    }

    // 处理参数
    /* Build the arguments vector */
    argv = zmalloc(sizeof(robj*)*argc);
    for (j = 0; j < argc; j++) {
        if (!lua_isstring(lua,j+1)) break;
        argv[j] = createStringObject((char*)lua_tostring(lua,j+1),
                                     lua_strlen(lua,j+1));
    }

    /* Check if one of the arguments passed by the Lua script
     * is not a string or an integer (lua_isstring() return true for
     * integers as well). */
    if (j != argc) {
        j--;
        while (j >= 0) {
            decrRefCount(argv[j]);
            j--;
        }
        zfree(argv);
        luaPushError(lua,
            "Lua redis() command arguments must be strings or integers");
        return 1;
    }

    // 将参数设置为虚拟客户端的参数
    /* Setup our fake client for command execution */
    c->argv = argv;
    c->argc = argc;

    // 查找命令
    /* Command lookup */
    cmd = lookupCommand(argv[0]->ptr);
    if (!cmd || ((cmd->arity > 0 && cmd->arity != argc) ||
                   (argc < -cmd->arity)))
    {
        if (cmd)
            luaPushError(lua,
                "Wrong number of args calling Redis command From Lua script");
        else
            luaPushError(lua,"Unknown Redis command called from Lua script");
        goto cleanup;
    }

    /* There are commands that are not allowed inside scripts. */
    if (cmd->flags & REDIS_CMD_NOSCRIPT) {
        luaPushError(lua, "This Redis command is not allowed from scripts");
        goto cleanup;
    }

    /* Write commands are forbidden against read-only slaves, or if a
     * command marked as non-deterministic was already called in the context
     * of this script. */
    // 涉及写操作的命令
    if (cmd->flags & REDIS_CMD_WRITE) {
        // 如果曾经 lua 脚本中执行未决命令,譬如 RANDOMKEY,那么写操作是不
        // 允许的
        if (server.lua_random_dirty) {
            luaPushError(lua,
                "Write commands not allowed after non deterministic commands");
            goto cleanup;

        // 此服务器为设定了只读的从机,且未处于加载数据阶段,那么写操作是不
        // 允许的
        } else if (server.masterhost && server.repl_slave_ro &&
                   !server.loading &&
                   !(server.lua_caller->flags & REDIS_MASTER))
        {
            luaPushError(lua, shared.roslaveerr->ptr);
            goto cleanup;

        // 如果设定了当 BGSAVE 失败时不能执行写操作,而且 BGSAVE 失败了,
        // 那么写操作是不允许的
        // server.stop_writes_on_bgsave_err 选项是说默认当 BGSAVE 持久化
        // 失败的时候,禁止接受写操作。当客户端发现写操作失败的时候,可以及时
        // 发现 redis 服务器持久化失败了,因此,这是给客户端发现持久化失败的
        // 机会
        } else if (server.stop_writes_on_bgsave_err &&
                   server.saveparamslen > 0 &&
                   server.lastbgsave_status == REDIS_ERR)
        {
            luaPushError(lua, shared.bgsaveerr->ptr);
            goto cleanup;
        }
    }

    // 内存超出预设值,执行淘汰策略
    /* If we reached the memory limit configured via maxmemory, commands that
     * could enlarge the memory usage are not allowed, but only if this is the
     * first write in the context of this script, otherwise we can't stop
     * in the middle. */
    if (server.maxmemory && server.lua_write_dirty == 0 &&
        (cmd->flags & REDIS_CMD_DENYOOM))
    {
        if (freeMemoryIfNeeded() == REDIS_ERR) {
            luaPushError(lua, shared.oomerr->ptr);
            goto cleanup;
        }
    }

    // 标记涉及随机和写操作
    if (cmd->flags & REDIS_CMD_RANDOM) server.lua_random_dirty = 1;
    if (cmd->flags & REDIS_CMD_WRITE) server.lua_write_dirty = 1;

    // 执行命令
    // c 实际上是一个虚拟的客户端,虚拟客户端即是为了复用普通命令处理函数
    // 以 get 命令的处理函数为例:
    // void getCommand(redisClient *c);
    /* Run the command */
    c->cmd = cmd;
    call(c,REDIS_CALL_SLOWLOG | REDIS_CALL_STATS);

    /* Convert the result of the Redis command into a suitable Lua type.
     * The first thing we need is to create a single string from the client
     * output buffers. */
    // 拷贝 redisClient 的处理结果
    reply = sdsempty();
    if (c->bufpos) {
        reply = sdscatlen(reply,c->buf,c->bufpos);
        c->bufpos = 0;
    }
    // 删除 redisClient 内的处理结果
    while(listLength(c->reply)) {
        robj *o = listNodeValue(listFirst(c->reply));

        reply = sdscatlen(reply,o->ptr,sdslen(o->ptr));
        listDelNode(c->reply,listFirst(c->reply));
    }
    if (raise_error && reply[0] != '-') raise_error = 0;
    redisProtocolToLuaType(lua,reply);
    /* Sort the output array if needed, assuming it is a non-null multi bulk
     * reply as expected. */
    if ((cmd->flags & REDIS_CMD_SORT_FOR_SCRIPT) &&
        (reply[0] == '*' && reply[1] != '-')) {
            luaSortArray(lua);
    }
    sdsfree(reply);
    c->reply_bytes = 0;

cleanup:
    /* Clean up. Command code may have changed argv/argc so we use the
     * argv/argc of the client instead of the local variables. */
    for (j = 0; j < c->argc; j++)
        decrRefCount(c->argv[j]);
    zfree(c->argv);

    if (raise_error) {
        /* If we are here we should have an error in the stack, in the
         * form of a table with an "err" field. Extract the string to
         * return the plain error. */
        lua_pushstring(lua,"err");
        lua_gettable(lua,-2);
        return lua_error(lua);
    }
    return 1;
}
Exemplo n.º 18
0
/* Remove all the entries from the current slow log. */
void slowlogReset(void) {
    while (listLength(server.slowlog) > 0)
        listDelNode(server.slowlog,listLast(server.slowlog));
}
Exemplo n.º 19
0
// EXEC 命令实现
void execCommand(client *c) {
    int j;
    robj **orig_argv;
    int orig_argc;
    struct redisCommand *orig_cmd;
    // 传播的标识
    int must_propagate = 0; /* Need to propagate MULTI/EXEC to AOF / slaves? */
    // 如果客户端当前不处于事务状态,回复错误后返回
    if (!(c->flags & CLIENT_MULTI)) {
        addReplyError(c,"EXEC without MULTI");
        return;
    }

    /* Check if we need to abort the EXEC because:
     * 1) Some WATCHed key was touched.
     * 2) There was a previous error while queueing commands.
     * A failed EXEC in the first case returns a multi bulk nil object
     * (technically it is not an error but a special behavior), while
     * in the second an EXECABORT error is returned. */
    // 检查是否需要中断EXEC的执行因为:
    /*
        1. 被监控的key被修改
        2. 入队命令时发生了错误
    */
    // 第一种情况返回空回复对象,第二种情况返回一个EXECABORT错误
    // 如果客户的处于 1.命令入队时错误或者2.被监控的key被修改
    if (c->flags & (CLIENT_DIRTY_CAS|CLIENT_DIRTY_EXEC)) {
        // 回复错误信息
        addReply(c, c->flags & CLIENT_DIRTY_EXEC ? shared.execaborterr :
                                                  shared.nullmultibulk);
        // 取消事务
        discardTransaction(c);
        // 跳转到处理监控器代码
        goto handle_monitor;
    }

    /* Exec all the queued commands */
    // 执行队列数组中的命令
    // 因为所有的命令都是安全的,因此取消对客户端的所有的键的监视
    unwatchAllKeys(c); /* Unwatch ASAP otherwise we'll waste CPU cycles */
    // 备份EXEC命令
    orig_argv = c->argv;
    orig_argc = c->argc;
    orig_cmd = c->cmd;
    // 回复一个事务命令的个数
    addReplyMultiBulkLen(c,c->mstate.count);
    // 遍历执行所有事务命令
    for (j = 0; j < c->mstate.count; j++) {
        // 设置一个当前事务命令给客户端
        c->argc = c->mstate.commands[j].argc;
        c->argv = c->mstate.commands[j].argv;
        c->cmd = c->mstate.commands[j].cmd;

        /* Propagate a MULTI request once we encounter the first write op.
         * This way we'll deliver the MULTI/..../EXEC block as a whole and
         * both the AOF and the replication link will have the same consistency
         * and atomicity guarantees. */
        // 当执行到第一个写命令时,传播事务状态
        if (!must_propagate && !(c->cmd->flags & CMD_READONLY)) {
            // 发送一个MULTI命令给所有的从节点和AOF文件
            execCommandPropagateMulti(c);
            // 设置已经传播过的标识
            must_propagate = 1;
        }
        // 执行该命令
        call(c,CMD_CALL_FULL);

        /* Commands may alter argc/argv, restore mstate. */
        // 命令可能会被修改,重新存储在事务命令队列中
        c->mstate.commands[j].argc = c->argc;
        c->mstate.commands[j].argv = c->argv;
        c->mstate.commands[j].cmd = c->cmd;
    }
    // 还原命令和参数
    c->argv = orig_argv;
    c->argc = orig_argc;
    c->cmd = orig_cmd;
    // 取消事务状态
    discardTransaction(c);
    /* Make sure the EXEC command will be propagated as well if MULTI
     * was already propagated. */
    // 如果传播了EXEC命令,表示执行了写命令,更新数据库脏键数
    if (must_propagate) server.dirty++;

handle_monitor:
    /* Send EXEC to clients waiting data from MONITOR. We do it here
     * since the natural order of commands execution is actually:
     * MUTLI, EXEC, ... commands inside transaction ...
     * Instead EXEC is flagged as CMD_SKIP_MONITOR in the command
     * table, and we do it here with correct ordering. */
    // 如果服务器设置了监控器,并且服务器不处于载入文件的状态
    if (listLength(server.monitors) && !server.loading)
        // 将参数列表中的参数发送给监控器
        replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc);
}
Exemplo n.º 20
0
/*
 * 处理后台任务
 */
void *bioProcessBackgroundJobs(void *arg) {
    struct bio_job *job;
    unsigned long type = (unsigned long) arg;
    sigset_t sigset;

    /* Make the thread killable at any time, so that bioKillThreads()
     * can work reliably. */
    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);

    pthread_mutex_lock(&bio_mutex[type]);
    /* Block SIGALRM so we are sure that only the main thread will
     * receive the watchdog signal. */
    sigemptyset(&sigset);
    sigaddset(&sigset, SIGALRM);
    if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
        redisLog(REDIS_WARNING,
            "Warning: can't mask SIGALRM in bio.c thread: %s", strerror(errno));

    while(1) {
        listNode *ln;

        /* The loop always starts with the lock hold. */
        if (listLength(bio_jobs[type]) == 0) {
            pthread_cond_wait(&bio_condvar[type],&bio_mutex[type]);
            continue;
        }

        /* Pop the job from the queue. 
         *
         * 取出(但不删除)队列中的首个任务
         */
        ln = listFirst(bio_jobs[type]);
        job = ln->value;

        /* It is now possible to unlock the background system as we know have
         * a stand alone job structure to process.*/
        pthread_mutex_unlock(&bio_mutex[type]);

        /* Process the job accordingly to its type. */
        // 执行任务
        if (type == REDIS_BIO_CLOSE_FILE) {
            close((long)job->arg1);

        } else if (type == REDIS_BIO_AOF_FSYNC) {
            aof_fsync((long)job->arg1);

        } else {
            redisPanic("Wrong job type in bioProcessBackgroundJobs().");
        }

        zfree(job);

        /* Lock again before reiterating the loop, if there are no longer
         * jobs to process we'll block again in pthread_cond_wait(). */
        pthread_mutex_lock(&bio_mutex[type]);
        // 将执行完成的任务从队列中删除,并减少任务计数器
        listDelNode(bio_jobs[type],ln);
        bio_pending[type]--;
    }
}
Exemplo n.º 21
0
        /** @brief Return the length of the arglist.
         */
	size_t size() const {
	    return listLength(list());
	}
Exemplo n.º 22
0
int SimulationResults_filterSimulationResults(const char *inFile, const char *outFile, void *vars, int numberOfIntervals)
{
  const char *msg[5] = {"","","","",""};
  void *tmp;
  if (UNKNOWN_PLOT == SimulationResultsImpl__openFile(inFile, &simresglob)) {
    return 0;
  }
  vars = mmc_mk_cons(mmc_mk_scon("time"),vars);
  switch (simresglob.curFormat) {
  case MATLAB4: {
    int numToFilter = listLength(vars);
    int i, j;
    int numUnique = 0;
    int numUniqueParam = 1;
    int longestName = 0;
    int longestDesc = 0;
    ModelicaMatVariable_t **mat_var = omc_alloc_interface.malloc(numToFilter*sizeof(ModelicaMatVariable_t*));
    int *indexes = (int*) omc_alloc_interface.malloc(simresglob.matReader.nvar*sizeof(int)); /* Need it to be zeros; note that the actual number of indexes is smaller */
    int *parameter_indexes = (int*) omc_alloc_interface.malloc(simresglob.matReader.nparam*sizeof(int)); /* Need it to be zeros; note that the actual number of indexes is smaller */
    int *indexesToOutput = NULL;
    int *parameter_indexesToOutput = NULL;
    FILE *fout = NULL;
    char *tmp;
    double start_stop[2] = {0};
    double start = omc_matlab4_startTime(&simresglob.matReader);
    double stop = omc_matlab4_stopTime(&simresglob.matReader);
    parameter_indexes[0] = 1; /* time */
    omc_matlab4_read_all_vals(&simresglob.matReader);
    if (endsWith(outFile,".csv")) {
      double **vals = omc_alloc_interface.malloc(sizeof(double*)*numToFilter);
      FILE *fout = NULL;
      for (i=0; i<numToFilter; i++) {
        const char *var = MMC_STRINGDATA(MMC_CAR(vars));
        vars = MMC_CDR(vars);
        mat_var[i] = omc_matlab4_find_var(&simresglob.matReader, var);
        if (mat_var[i] == NULL) {
          msg[0] = SystemImpl__basename(inFile);
          msg[1] = var;
          c_add_message(NULL,-1, ErrorType_scripting, ErrorLevel_error, gettext("Could not read variable %s in file %s."), msg, 2);
          return 0;
        }
        if (mat_var[i]->isParam) {
          msg[0] = var;
          c_add_message(NULL,-1, ErrorType_scripting, ErrorLevel_error, gettext("Could not filter parameter %s since the output format is CSV (only variables are allowed)."), msg, 1);
          return 0;
        } else {
          vals[i] = omc_matlab4_read_vals(&simresglob.matReader, mat_var[i]->index);
        }
      }
      fout = fopen(outFile, "w");
      fprintf(fout, "time");
      for (i=1; i<numToFilter; i++) {
        fprintf(fout, ",\"%s\"", mat_var[i]->name);
      }
      fprintf(fout, ",nrows=%d\n", simresglob.matReader.nrows);
      for (i=0; i<simresglob.matReader.nrows; i++) {
        fprintf(fout, "%.15g", vals[0][i]);
        for (j=1; j<numToFilter; j++) {
          fprintf(fout, ",%.15g", vals[j][i]);
        }
        fprintf(fout, "\n");
      }
      fclose(fout);
      return 1;
    } /* Not CSV */

    for (i=0; i<numToFilter; i++) {
      const char *var = MMC_STRINGDATA(MMC_CAR(vars));
      vars = MMC_CDR(vars);
      mat_var[i] = omc_matlab4_find_var(&simresglob.matReader,var);
      if (mat_var[i] == NULL) {
        msg[0] = SystemImpl__basename(inFile);
        msg[1] = var;
        c_add_message(NULL,-1, ErrorType_scripting, ErrorLevel_error, gettext("Could not read variable %s in file %s."), msg, 2);
        return 0;
      }
      if (mat_var[i]->isParam) {
        /* Store the old index in the array */
        if (0==parameter_indexes[abs(mat_var[i]->index)-1]++) {
          numUniqueParam++;
        }
      } else {
        /* Store the old index in the array */
        if (0==indexes[abs(mat_var[i]->index)-1]++) {
          numUnique++;
        }
      }
      longestName = intMax(longestName, strlen(mat_var[i]->name));
      longestDesc = intMax(longestDesc, strlen(mat_var[i]->descr));
    }
    /* Create the list of variable indexes to output */
    indexesToOutput = omc_alloc_interface.malloc_atomic(numUnique * sizeof(int));
    parameter_indexesToOutput = omc_alloc_interface.malloc_atomic(numUniqueParam * sizeof(int));
    j=0;
    for (i=0; i<simresglob.matReader.nvar; i++) {
      if (indexes[i]) {
        indexesToOutput[j++] = i+1;
      }
      /* indexes becomes the lookup table from old index to new index */
      indexes[i] = j;
    }
    j=0;
    for (i=0; i<simresglob.matReader.nparam; i++) {
      if (parameter_indexes[i]) {
        parameter_indexesToOutput[j++] = i+1;
      }
      /* indexes becomes the lookup table from old index to new index */
      parameter_indexes[i] = j;
    }
    fout = fopen(outFile, "wb");
    if (fout == NULL) {
      return failedToWriteToFile(outFile);
    }
    /* Matrix list: "Aclass" "name" "description" "dataInfo" "data_1" "data_2" */
    if (writeMatVer4AclassNormal(fout)) {
      return failedToWriteToFile(outFile);
    }
    if (writeMatVer4MatrixHeader(fout, "name", numToFilter, longestName, sizeof(int8_t))) {
      return failedToWriteToFile(outFile);
    }

    tmp = omc_alloc_interface.malloc(numToFilter*longestName);
    for (i=0; i<numToFilter; i++) {
      int len = strlen(mat_var[i]->name);
      for (j=0; j<len; j++) {
        tmp[numToFilter*j+i] = mat_var[i]->name[j];
      }
    }
    if (1 != fwrite(tmp, numToFilter*longestName, 1, fout)) {
      return failedToWriteToFile(outFile);
    }
    GC_free(tmp);

    if (writeMatVer4MatrixHeader(fout, "description", numToFilter, longestDesc, sizeof(int8_t))) {
      return failedToWriteToFile(outFile);
    }

    tmp = omc_alloc_interface.malloc(numToFilter*longestDesc);
    for (i=0; i<numToFilter; i++) {
      int len = strlen(mat_var[i]->descr);
      for (j=0; j<len; j++) {
        tmp[numToFilter*j+i] = mat_var[i]->descr[j];
      }
    }
    if (1 != fwrite(tmp, numToFilter*longestDesc, 1, fout)) {
      return failedToWriteToFile(outFile);
    }
    GC_free(tmp);

    if (writeMatVer4MatrixHeader(fout, "dataInfo", numToFilter, 4, sizeof(int32_t))) {
      return failedToWriteToFile(outFile);
    }
    for (i=0; i<numToFilter; i++) {
      int32_t x = mat_var[i]->isParam ? 1 : 2; /* data_1 or data_2 */
      if (1 != fwrite(&x, sizeof(int32_t), 1, fout)) {
        return failedToWriteToFile(outFile);
      }
    }
    for (i=0; i<numToFilter; i++) {
      int32_t x = (mat_var[i]->index < 0 ? -1 : 1) * (mat_var[i]->isParam ? parameter_indexes[abs(mat_var[i]->index)-1] : indexes[abs(mat_var[i]->index)-1]);
      if (1 != fwrite(&x, sizeof(int32_t), 1, fout)) {
        return failedToWriteToFile(outFile);
      }
    }
    for (i=0; i<numToFilter; i++) {
      int32_t x = 0; /* linear interpolation */
      if (1 != fwrite(&x, sizeof(int32_t), 1, fout)) {
        return failedToWriteToFile(outFile);
      }
    }
    for (i=0; i<numToFilter; i++) {
      int32_t x = -1; /* not defined outside the time interval */
      if (1 != fwrite(&x, sizeof(int32_t), 1, fout)) {
        return failedToWriteToFile(outFile);
      }
    }

    if (writeMatVer4MatrixHeader(fout, "data_1", 2, numUniqueParam, sizeof(double))) {
      return failedToWriteToFile(outFile);
    }
    start = omc_matlab4_startTime(&simresglob.matReader);
    stop = omc_matlab4_stopTime(&simresglob.matReader);
    start_stop[0]=start;
    start_stop[1]=stop;

    if (1 != fwrite(start_stop, sizeof(double)*2, 1, fout)) {
      return failedToWriteToFile(outFile);
    }

    for (i=1; i<numUniqueParam; i++) {
      int paramIndex = parameter_indexesToOutput[i];
      double d[2] = {simresglob.matReader.params[abs(paramIndex)-1],0};
      d[1] = d[0];
      if (1!=fwrite(d, sizeof(double)*2, 1, fout)) {
        return failedToWriteToFile(outFile);
      }
    }

    if (numberOfIntervals) {
      double *timevals = omc_matlab4_read_vals(&simresglob.matReader, 1);
      int last_found=0;
      int nevents=0, neventpoints=0;
      for (i=1; i<numberOfIntervals; i++) {
        double t = start + (stop-start)*((double)i)/numberOfIntervals;
        while (timevals[j]<=t) {
          if (timevals[j]==timevals[j+1]) {
            while (timevals[j]==timevals[j+1]) {
              j++;
              neventpoints++;
            }
            nevents++;
          }
          j++;
        }
      }
      msg[4] = inFile;
      GC_asprintf((char**)msg+3, "%d", simresglob.matReader.nrows);
      GC_asprintf((char**)msg+2, "%d", numberOfIntervals);
      GC_asprintf((char**)msg+1, "%d", nevents);
      GC_asprintf((char**)msg+0, "%d", neventpoints);
      c_add_message(NULL,-1, ErrorType_scripting, ErrorLevel_notification, gettext("Resampling %s from %s points to %s points, removing %s events stored in %s points.\n"), msg, 5);
    }

    if (writeMatVer4MatrixHeader(fout, "data_2", numberOfIntervals ? numberOfIntervals+1 : simresglob.matReader.nrows, numUnique, sizeof(double))) {
      return failedToWriteToFile(outFile);
    }
    for (i=0; i<numUnique; i++) {
      double *vals = NULL;
      int nrows;
      if (numberOfIntervals) {
        omc_matlab4_read_all_vals(&simresglob.matReader);
        nrows = numberOfIntervals+1;
        vals = omc_alloc_interface.malloc_atomic(sizeof(double)*nrows);
        for (j=0; j<=numberOfIntervals; j++) {
          double t = j==numberOfIntervals ? stop : start + (stop-start)*((double)j)/numberOfIntervals;
          ModelicaMatVariable_t var = {0};
          var.name="";
          var.descr="";
          var.isParam=0;
          var.index=indexesToOutput[i];
          if (omc_matlab4_val(vals+j, &simresglob.matReader, &var, t)) {
            msg[2] = inFile;
            GC_asprintf((char**)msg+1, "%d", indexesToOutput[i]);
            GC_asprintf((char**)msg+0, "%.15g", t);
            c_add_message(NULL,-1, ErrorType_scripting, ErrorLevel_error, gettext("Resampling %s failed to get variable %s at time %s.\n"), msg, 3);
            return 0;
          }
        }
      } else {
        vals = omc_matlab4_read_vals(&simresglob.matReader, indexesToOutput[i]);
        nrows = simresglob.matReader.nrows;
      }
      if (1!=fwrite(vals, sizeof(double)*nrows, 1, fout)) {
        return failedToWriteToFile(outFile);
      }
      if (numberOfIntervals) {
        GC_free(vals);
      }
    }
    fclose(fout);
    return 1;
  }
  default:
    msg[0] = PlotFormatStr[simresglob.curFormat];
    c_add_message(NULL,-1, ErrorType_scripting, ErrorLevel_error, gettext("filterSimulationResults not implemented for plot format: %s\n"), msg, 1);
    return 0;
  }
}
/* Unsubscribe a client from a channel. Returns 1 if the operation succeeded, or
 * 0 if the client was not subscribed to the specified channel. 
 *
 * 客户端 c 退订频道 channel 。
 *
 * 如果取消成功返回 1 ,如果因为客户端未订阅频道,而造成取消失败,返回 0 。
 */
int pubsubUnsubscribeChannel(redisClient *c, robj *channel, int notify) {
    dictEntry *de;
    list *clients;
    listNode *ln;
    int retval = 0;

    /* Remove the channel from the client -> channels hash table */
    // 将频道 channel 从 client->channels 字典中移除
    incrRefCount(channel); /* channel may be just a pointer to the same object
                            we have in the hash tables. Protect it... */
    // 示意图:
    // before:
    // {
    //  'channel-x': NULL,
    //  'channel-y': NULL,
    //  'channel-z': NULL,
    // }
    // after unsubscribe channel-y :
    // {
    //  'channel-x': NULL,
    //  'channel-z': NULL,
    // }
    if (dictDelete(c->pubsub_channels,channel) == DICT_OK) {

        // channel 移除成功,表示客户端订阅了这个频道,执行以下代码

        retval = 1;
        /* Remove the client from the channel -> clients list hash table */
        // 从 channel->clients 的 clients 链表中,移除 client 
        // 示意图:
        // before:
        // {
        //  'channel-x' : [c1, c2, c3],
        // }
        // after c2 unsubscribe channel-x:
        // {
        //  'channel-x' : [c1, c3]
        // }
        de = dictFind(server.pubsub_channels,channel);
        redisAssertWithInfo(c,NULL,de != NULL);
        clients = dictGetVal(de);
        ln = listSearchKey(clients,c);
        redisAssertWithInfo(c,NULL,ln != NULL);
        listDelNode(clients,ln);

        // 如果移除 client 之后链表为空,那么删除这个 channel 键
        // 示意图:
        // before
        // {
        //  'channel-x' : [c1]
        // }
        // after c1 ubsubscribe channel-x
        // then also delete 'channel-x' key in dict
        // {
        //  // nothing here
        // }
        if (listLength(clients) == 0) {
            /* Free the list and associated hash entry at all if this was
             * the latest client, so that it will be possible to abuse
             * Redis PUBSUB creating millions of channels. */
            dictDelete(server.pubsub_channels,channel);
        }
    }

    /* Notify the client */
    // 回复客户端
    if (notify) {
        addReply(c,shared.mbulkhdr[3]);
        // "ubsubscribe" 字符串
        addReply(c,shared.unsubscribebulk);
        // 被退订的频道
        addReplyBulk(c,channel);
        // 退订频道之后客户端仍在订阅的频道和模式的总数
        addReplyLongLong(c,dictSize(c->pubsub_channels)+
                       listLength(c->pubsub_patterns));

    }

    decrRefCount(channel); /* it is finally safe to release it */

    return retval;
}
Exemplo n.º 24
0
void syncCommand(redisClient *c) {
	//响应redis的sync指令
    /* ignore SYNC if already slave or in monitor mode */
    if (c->flags & REDIS_SLAVE) return;

    /* Refuse SYNC requests if we are a slave but the link with our master
     * is not ok... */
    if (server.masterhost && server.repl_state != REDIS_REPL_CONNECTED) {
        addReplyError(c,"Can't SYNC while not connected with my master");
        return;
    }

    /* SYNC can't be issued when the server has pending data to send to
     * the client about already issued commands. We need a fresh reply
     * buffer registering the differences between the BGSAVE and the current
     * dataset, so that we can copy to other slaves if needed. */
    if (listLength(c->reply) != 0) {//如果服务端还有数据没有发送给客户端,那先不处理sync请求
        addReplyError(c,"SYNC is invalid with pending input");
        return;
    }

    redisLog(REDIS_NOTICE,"Slave ask for synchronization");
    /* Here we need to check if there is a background saving operation
     * in progress, or if it is required to start one */
    if (server.rdb_child_pid != -1) {
        /* Ok a background save is in progress. Let's check if it is a good
         * one for replication, i.e. if there is another slave that is
         * registering differences since the server forked to save */
        redisClient *slave;
        listNode *ln;
        listIter li;

        listRewind(server.slaves,&li);
		//正好现在有RDB程序在保存日志,那么我需要查看一下是否正好有从库在等待这个RDB结束的操作
		//如果有,我就可以偷偷的将其尚未发送,累积起来的写操作日志拷贝一份,插队到后面作为从库。
		//等待全部RDB文件发送完后将这部分累积的日志发送给这个新从库,其实就是类似插队的意思,插上对后,顺便打一碗饭
        while((ln = listNext(&li))) {
            slave = ln->value;
            if (slave->replstate == REDIS_REPL_WAIT_BGSAVE_END) break;
        }
        if (ln) {//正好有其他slave出发了BGSAVE操作,那么就正好可以通过copy同伴的缓冲区的方式追上。
        //这些buffer是一个客户端注册成为从库之后,主库会主动的在replicationFeedSlaves
        //里面将新写入日志追加到这里的。只是那个时候还不能发送,没注册写事件
            /* Perfect, the server is already registering differences for
             * another slave. Set the right state, and copy the buffer. */
            copyClientOutputBuffer(c,slave);//将要发送给之前的slave的数据拷贝一份,也就是也要发送给新的这个从库。
            c->replstate = REDIS_REPL_WAIT_BGSAVE_END;
            redisLog(REDIS_NOTICE,"Waiting for end of BGSAVE for SYNC");
        } else {
            /* No way, we need to wait for the next BGSAVE in order to
             * register differences */
        //由于之前正好没有客户端在后台保存过程中发送sync,所以只能老老实话等待下一个bgsave了。
            c->replstate = REDIS_REPL_WAIT_BGSAVE_START;
            redisLog(REDIS_NOTICE,"Waiting for next BGSAVE for SYNC");
        }
    } else {//没办法了,老老实实进行BGSAVE,
        /* Ok we don't have a BGSAVE in progress, let's start one */
        redisLog(REDIS_NOTICE,"Starting BGSAVE for SYNC");
        if (rdbSaveBackground(server.rdb_filename) != REDIS_OK) {
            redisLog(REDIS_NOTICE,"Replication failed, can't BGSAVE");
            addReplyError(c,"Unable to perform background save");
            return;
        }
        c->replstate = REDIS_REPL_WAIT_BGSAVE_END;
    }
//SYNC命令只是尝试启动RDB快照,真正的数据发送再快照完成后的updateSlavesWaitingBgsave
    if (server.repl_disable_tcp_nodelay)
        anetDisableTcpNoDelay(NULL, c->fd); /* Non critical if it fails. */
    c->repldbfd = -1;
    c->flags |= REDIS_SLAVE;//成功,标记为slave
    c->slaveseldb = 0;
    listAddNodeTail(server.slaves,c);
    return;
}
/* PUBSUB command for Pub/Sub introspection. */
void pubsubCommand(redisClient *c) {

    // PUBSUB CHANNELS [pattern] 子命令
    if (!strcasecmp(c->argv[1]->ptr,"channels") &&
        (c->argc == 2 || c->argc ==3))
    {
        /* PUBSUB CHANNELS [<pattern>] */
        // 检查命令请求是否给定了 pattern 参数
        // 如果没有给定的话,就设为 NULL
        sds pat = (c->argc == 2) ? NULL : c->argv[2]->ptr;

        // 创建 pubsub_channels 的字典迭代器
        // 该字典的键为频道,值为链表
        // 链表中保存了所有订阅键所对应的频道的客户端
        dictIterator *di = dictGetIterator(server.pubsub_channels);
        dictEntry *de;
        long mblen = 0;
        void *replylen;

        replylen = addDeferredMultiBulkLength(c);
        // 从迭代器中获取一个客户端
        while((de = dictNext(di)) != NULL) {

            // 从字典中取出客户端所订阅的频道
            robj *cobj = dictGetKey(de);
            sds channel = cobj->ptr;

            // 顺带一提
            // 因为 Redis 的字典实现只能遍历字典的值(客户端)
            // 所以这里才会有遍历字典值然后通过字典值取出字典键(频道)的蹩脚用法

            // 如果没有给定 pattern 参数,那么打印所有找到的频道
            // 如果给定了 pattern 参数,那么只打印和 pattern 相匹配的频道
            if (!pat || stringmatchlen(pat, sdslen(pat),
                                       channel, sdslen(channel),0))
            {
                // 向客户端输出频道
                addReplyBulk(c,cobj);
                mblen++;
            }
        }
        // 释放字典迭代器
        dictReleaseIterator(di);
        setDeferredMultiBulkLength(c,replylen,mblen);

    // PUBSUB NUMSUB [channel-1 channel-2 ... channel-N] 子命令
    } else if (!strcasecmp(c->argv[1]->ptr,"numsub") && c->argc >= 2) {
        /* PUBSUB NUMSUB [Channel_1 ... Channel_N] */
        int j;

        addReplyMultiBulkLen(c,(c->argc-2)*2);
        for (j = 2; j < c->argc; j++) {

            // c->argv[j] 也即是客户端输入的第 N 个频道名字
            // pubsub_channels 的字典为频道名字
            // 而值则是保存了 c->argv[j] 频道所有订阅者的链表
            // 而调用 dictFetchValue 也就是取出所有订阅给定频道的客户端
            list *l = dictFetchValue(server.pubsub_channels,c->argv[j]);

            addReplyBulk(c,c->argv[j]);
            // 向客户端返回链表的长度属性
            // 这个属性就是某个频道的订阅者数量
            // 例如:如果一个频道有三个订阅者,那么链表的长度就是 3
            // 而返回给客户端的数字也是三
            addReplyBulkLongLong(c,l ? listLength(l) : 0);
        }

    // PUBSUB NUMPAT 子命令
    } else if (!strcasecmp(c->argv[1]->ptr,"numpat") && c->argc == 2) {
        /* PUBSUB NUMPAT */

        // pubsub_patterns 链表保存了服务器中所有被订阅的模式
        // pubsub_patterns 的长度就是服务器中被订阅模式的数量
        addReplyLongLong(c,listLength(server.pubsub_patterns));

    // 错误处理
    } else {
        addReplyErrorFormat(c,
            "Unknown PUBSUB subcommand or wrong number of arguments for '%s'",
            (char*)c->argv[1]->ptr);
    }
}
Exemplo n.º 26
0
int stackLength(Stack * stack)
{
    return(listLength(stack -> top));
}
Exemplo n.º 27
0
/* This function should be called by Redis every time a single command,
 * a MULTI/EXEC block, or a Lua script, terminated its execution after
 * being called by a client.
 *
 * All the keys with at least one client blocked that received at least
 * one new element via some PUSH operation are accumulated into
 * the server.ready_keys list. This function will run the list and will
 * serve clients accordingly. Note that the function will iterate again and
 * again as a result of serving BRPOPLPUSH we can have new blocking clients
 * to serve because of the PUSH side of BRPOPLPUSH. */
void handleClientsBlockedOnLists(void) {
    while(listLength(server.ready_keys) != 0) {
        list *l;

        /* Point server.ready_keys to a fresh list and save the current one
         * locally. This way as we run the old list we are free to call
         * signalListAsReady() that may push new elements in server.ready_keys
         * when handling clients blocked into BRPOPLPUSH. */
        l = server.ready_keys;
        server.ready_keys = listCreate();

        while(listLength(l) != 0) {
            robj *o;
            listNode *ln = listFirst(l);
            readyList *rl = ln->value;

            /* First of all remove this key from db->ready_keys so that
             * we can safely call signalListAsReady() against this key. */
            dictDelete(rl->db->ready_keys,rl->key);

            /* If the key exists and it's a list, serve blocked clients
             * with data. */
            o = lookupKeyWrite(rl->db,rl->key);
            if (o != NULL && o->type == REDIS_LIST) {
                dictEntry *de;

                /* We serve clients in the same order they blocked for
                 * this key, from the first blocked to the last. */
                de = dictFind(rl->db->blocking_keys,rl->key);
                if (de) {
                    list *clients = dictGetVal(de);
                    int numclients = listLength(clients);

                    while(numclients--) {
                        listNode *clientnode = listFirst(clients);
                        redisClient *receiver = clientnode->value;
                        robj *dstkey = receiver->bpop.target;
                        int where = (receiver->lastcmd &&
                                     receiver->lastcmd->proc == blpopCommand) ?
                                    REDIS_HEAD : REDIS_TAIL;
                        robj *value = listTypePop(o,where);

                        if (value) {
                            /* Protect receiver->bpop.target, that will be
                             * freed by the next unblockClientWaitingData()
                             * call. */
                            if (dstkey) incrRefCount(dstkey);
                            unblockClientWaitingData(receiver);

                            if (serveClientBlockedOnList(receiver,
                                rl->key,dstkey,rl->db,value,
                                where) == REDIS_ERR)
                            {
                                /* If we failed serving the client we need
                                 * to also undo the POP operation. */
                                    listTypePush(o,value,where);
                            }

                            if (dstkey) decrRefCount(dstkey);
                            decrRefCount(value);
                        } else {
                            break;
                        }
                    }
                }
                
                if (listTypeLength(o) == 0) dbDelete(rl->db,rl->key);
                /* We don't call signalModifiedKey() as it was already called
                 * when an element was pushed on the list. */
            }

            /* Free this item. */
            decrRefCount(rl->key);
            zfree(rl);
            listDelNode(l,ln);
        }
        listRelease(l); /* We have the new list on place at this point. */
    }
}
Exemplo n.º 28
0
int _installWriteEvent(redisClient *c) {
    if (c->fd <= 0) return REDIS_ERR;
    if (c->bufpos == 0 && listLength(c->reply) == 0 &&
        (c->replstate == REDIS_REPL_NONE ||
         c->replstate == REDIS_REPL_ONLINE) &&
        aeCreateFileEvent(server.el, c->fd, AE_WRITABLE,
        sendReplyToClient, c) == AE_ERR) return REDIS_ERR;
    return REDIS_OK;
}

/* Create a duplicate of the last object in the reply list when
 * it is not exclusively owned by the reply list. */
robj *dupLastObjectIfNeeded(list *reply) {
    robj *new, *cur;
    listNode *ln;
    redisAssert(listLength(reply) > 0);
    ln = listLast(reply);
    cur = listNodeValue(ln);
    if (cur->refcount > 1) {
        new = dupStringObject(cur);
        decrRefCount(cur);
        listNodeValue(ln) = new;
    }
    return listNodeValue(ln);
}

/* -----------------------------------------------------------------------------
 * Low level functions to add more data to output buffers.
 * -------------------------------------------------------------------------- */

int _addReplyToBuffer(redisClient *c, char *s, size_t len) {
Exemplo n.º 29
0
/* Replay the append log file. On error REDIS_OK is returned. On non fatal
 * error (the append only file is zero-length) REDIS_ERR is returned. On
 * fatal error an error message is logged and the program exists. */
int loadAppendOnlyFile(char *filename) {
    struct redisClient *fakeClient;
    FILE *fp = fopen(filename,"r");
    struct redis_stat sb;
    int old_aof_state = server.aof_state;
    long loops = 0;

    if (fp && redis_fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) {
        server.aof_current_size = 0;
        fclose(fp);
        return REDIS_ERR;
    }

    if (fp == NULL) {
        redisLog(REDIS_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno));
        exit(1);
    }

    /* Temporarily disable AOF, to prevent EXEC from feeding a MULTI
     * to the same file we're about to read. */
    server.aof_state = REDIS_AOF_OFF;

    fakeClient = createFakeClient();
    startLoading(fp);

    while(1) {
        int argc, j;
        unsigned long len;
        robj **argv;
        char buf[128];
        sds argsds;
        struct redisCommand *cmd;

        /* Serve the clients from time to time */
        if (!(loops++ % 1000)) {
            loadingProgress(ftello(fp));
            aeProcessEvents(server.el, AE_FILE_EVENTS|AE_DONT_WAIT);
        }

        if (fgets(buf,sizeof(buf),fp) == NULL) {
            if (feof(fp))
                break;
            else
                goto readerr;
        }
        if (buf[0] != '*') goto fmterr;
        argc = atoi(buf+1);
        if (argc < 1) goto fmterr;

        argv = zmalloc(sizeof(robj*)*argc);
        for (j = 0; j < argc; j++) {
            if (fgets(buf,sizeof(buf),fp) == NULL) goto readerr;
            if (buf[0] != '$') goto fmterr;
            len = strtol(buf+1,NULL,10);
            argsds = sdsnewlen(NULL,len);
            if (len && fread(argsds,len,1,fp) == 0) goto fmterr;
            argv[j] = createObject(REDIS_STRING,argsds);
            if (fread(buf,2,1,fp) == 0) goto fmterr; /* discard CRLF */
        }

        /* Command lookup */
        cmd = lookupCommand(argv[0]->ptr);
        if (!cmd) {
            redisLog(REDIS_WARNING,"Unknown command '%s' reading the append only file", argv[0]->ptr);
            exit(1);
        }
        /* Run the command in the context of a fake client */
        fakeClient->argc = argc;
        fakeClient->argv = argv;
        cmd->proc(fakeClient);

        /* The fake client should not have a reply */
        redisAssert(fakeClient->bufpos == 0 && listLength(fakeClient->reply) == 0);
        /* The fake client should never get blocked */
        redisAssert((fakeClient->flags & REDIS_BLOCKED) == 0);

        /* Clean up. Command code may have changed argv/argc so we use the
         * argv/argc of the client instead of the local variables. */
        for (j = 0; j < fakeClient->argc; j++)
            decrRefCount(fakeClient->argv[j]);
        zfree(fakeClient->argv);
    }

    /* This point can only be reached when EOF is reached without errors.
     * If the client is in the middle of a MULTI/EXEC, log error and quit. */
    if (fakeClient->flags & REDIS_MULTI) goto readerr;

    fclose(fp);
    freeFakeClient(fakeClient);
    server.aof_state = old_aof_state;
    stopLoading();
    aofUpdateCurrentSize();
    server.aof_rewrite_base_size = server.aof_current_size;
    return REDIS_OK;

readerr:
    if (feof(fp)) {
        redisLog(REDIS_WARNING,"Unexpected end of file reading the append only file");
    } else {
        redisLog(REDIS_WARNING,"Unrecoverable error reading the append only file: %s", strerror(errno));
    }
    exit(1);
fmterr:
    redisLog(REDIS_WARNING,"Bad file format reading the append only file: make a backup of your AOF file, then use ./redis-check-aof --fix <filename>");
    exit(1);
}
Exemplo n.º 30
0
// 执行事务内的所有命令
void execCommand(redisClient *c) {
    int j;
    robj **orig_argv;
    int orig_argc;
    struct redisCommand *orig_cmd;
    int must_propagate = 0; /* Need to propagate MULTI/EXEC to AOF / slaves? */

    // 必须设置多命令标记
    if (!(c->flags & REDIS_MULTI)) {
        addReplyError(c,"EXEC without MULTI");
        return;
    }

    // 停止执行事务命令的情况:
    // 1. 被监视的数据被修改
    // 2. 命令队列中的命令执行失败
    /* Check if we need to abort the EXEC because:
     * 1) Some WATCHed key was touched.
     * 2) There was a previous error while queueing commands.
     * A failed EXEC in the first case returns a multi bulk nil object
     * (technically it is not an error but a special behavior), while
     * in the second an EXECABORT error is returned. */
    if (c->flags & (REDIS_DIRTY_CAS|REDIS_DIRTY_EXEC)) {
        addReply(c, c->flags & REDIS_DIRTY_EXEC ? shared.execaborterr :
                 shared.nullmultibulk);
        discardTransaction(c);
        goto handle_monitor;
    }

    // 执行队列中的所有命令
    /* Exec all the queued commands */
    unwatchAllKeys(c); /* Unwatch ASAP otherwise we'll waste CPU cycles */

    // 保存当前的命令,一般为 MULTI,在执行完所有的命令后会恢复。
    orig_argv = c->argv;
    orig_argc = c->argc;
    orig_cmd = c->cmd;

    addReplyMultiBulkLen(c,c->mstate.count);

    for (j = 0; j < c->mstate.count; j++) {
        // 命令队列中的命令被赋值给当前的命令
        c->argc = c->mstate.commands[j].argc;
        c->argv = c->mstate.commands[j].argv;
        c->cmd = c->mstate.commands[j].cmd;

        // 遇到包含写操作的命令需要将 MULTI 命令写入 AOF 文件
        /* Propagate a MULTI request once we encounter the first write op.
         * This way we'll deliver the MULTI/..../EXEC block as a whole and
         * both the AOF and the replication link will have the same consistency
         * and atomicity guarantees. */
        if (!must_propagate && !(c->cmd->flags & REDIS_CMD_READONLY)) {
            execCommandPropagateMulti(c);
            must_propagate = 1;
        }

        // 调用 call() 执行
        call(c,REDIS_CALL_FULL);

        // 这几句是多余的
        /* Commands may alter argc/argv, restore mstate. */
        c->mstate.commands[j].argc = c->argc;
        c->mstate.commands[j].argv = c->argv;
        c->mstate.commands[j].cmd = c->cmd;
    }

    // 恢复当前的命令,一般为 MULTI
    c->argv = orig_argv;
    c->argc = orig_argc;
    c->cmd = orig_cmd;

    // 事务已经执行完毕,清理与此事务相关的信息,如命令队列和客户端标记
    discardTransaction(c);
    /* Make sure the EXEC command will be propagated as well if MULTI
     * was already propagated. */
    if (must_propagate) server.dirty++;

// 和监视器相关,后续提到
handle_monitor:
    /* Send EXEC to clients waiting data from MONITOR. We do it here
     * since the natural order of commands execution is actually:
     * MUTLI, EXEC, ... commands inside transaction ...
     * Instead EXEC is flagged as REDIS_CMD_SKIP_MONITOR in the command
     * table, and we do it here with correct ordering. */
    if (listLength(server.monitors) && !server.loading)
        replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc);
}