コード例 #1
0
ファイル: db.c プロジェクト: Xwuming/misc
/* Overwrite an existing key with a new value. Incrementing the reference
 * count of the new value is up to the caller.
 * This function does not modify the expire time of the existing key.
 *
 * The program is aborted if the key was not already present. */
void dbOverwrite(redisDb *db, robj *key, robj *val) {
    dictEntry *de = dictFind(db->dict,key->ptr);

    serverAssertWithInfo(NULL,key,de != NULL);
    dictReplace(db->dict, key->ptr, val);
}
コード例 #2
0
ファイル: dict.c プロジェクト: 343829084/redis-storage
/* dictReplaceRaw() is simply a version of dictAddRaw() that always
 * returns the hash entry of the specified key, even if the key already
 * exists and can't be added (in that case the entry of the already
 * existing key is returned.)
 *
 * See dictAddRaw() for more information. */
dictEntry *dictReplaceRaw(dict *d, void *key) {
    dictEntry *entry = dictFind(d,key);

    return entry ? entry : dictAddRaw(d,key);
}
コード例 #3
0
ファイル: dict.c プロジェクト: sijianwudi/varlib
void *dictFetchValue(dict *d, const void *key) {
    dictEntry *he;

    he = dictFind(d,key);
    return he ? dictGetEntryVal(he) : NULL;
}
コード例 #4
0
ファイル: vm.c プロジェクト: ambakshi/redis
/* This function makes the clinet 'c' waiting for the key 'key' to be loaded.
 * If there is not already a job loading the key, it is craeted.
 * The key is added to the io_keys list in the client structure, and also
 * in the hash table mapping swapped keys to waiting clients, that is,
 * server.io_waited_keys. */
int waitForSwappedKey(redisClient *c, robj *key) {
    struct dictEntry *de;
    robj *o;
    list *l;

    /* If the key does not exist or is already in RAM we don't need to
     * block the client at all. */
    de = dictFind(c->db->dict,key->ptr);
    if (de == NULL) return 0;
    o = dictGetEntryVal(de);
    if (o->storage == REDIS_VM_MEMORY) {
        return 0;
    } else if (o->storage == REDIS_VM_SWAPPING) {
        /* We were swapping the key, undo it! */
        vmCancelThreadedIOJob(o);
        return 0;
    }

    /* OK: the key is either swapped, or being loaded just now. */

    /* Add the key to the list of keys this client is waiting for.
     * This maps clients to keys they are waiting for. */
    listAddNodeTail(c->io_keys,key);
    incrRefCount(key);

    /* Add the client to the swapped keys => clients waiting map. */
    de = dictFind(c->db->io_keys,key);
    if (de == NULL) {
        int retval;

        /* For every key we take a list of clients blocked for it */
        l = listCreate();
        retval = dictAdd(c->db->io_keys,key,l);
        incrRefCount(key);
        redisAssert(retval == DICT_OK);
    } else {
        l = dictGetEntryVal(de);
    }
    listAddNodeTail(l,c);

    /* Are we already loading the key from disk? If not create a job */
    if (o->storage == REDIS_VM_SWAPPED) {
        iojob *j;
        vmpointer *vp = (vmpointer*)o;

        o->storage = REDIS_VM_LOADING;
        j = zmalloc(sizeof(*j));
        j->type = REDIS_IOJOB_LOAD;
        j->db = c->db;
        j->id = (robj*)vp;
        j->key = key;
        incrRefCount(key);
        j->page = vp->page;
        j->val = NULL;
        j->canceled = 0;
        j->thread = (pthread_t) -1;

        lockThreadedIO();
        queueIOJob(j);
        unlockThreadedIO();
    }
    return 1;
}
コード例 #5
0
ファイル: db.c プロジェクト: xinmingyao/libredis.a
int removeXExpire(redisDb *db, robj *key) {
    if (dictFind(db->dict,key->ptr) == NULL) {
        return DICT_ERR;
    }
    return dictDelete(db->expires,key->ptr) == DICT_OK;
}
コード例 #6
0
ファイル: t_list.c プロジェクト: 0nix/tictactoe-node
/* This function should be called by Redis every time a single command,
 * a MULTI/EXEC block, or a Lua script, terminated its execution after
 * being called by a client.
 *
 * All the keys with at least one client blocked that received at least
 * one new element via some PUSH operation are accumulated into
 * the server.ready_keys list. This function will run the list and will
 * serve clients accordingly. Note that the function will iterate again and
 * again as a result of serving BRPOPLPUSH we can have new blocking clients
 * to serve because of the PUSH side of BRPOPLPUSH. */
void handleClientsBlockedOnLists(void) {
    while(listLength(server.ready_keys) != 0) {
        list *l;

        /* Point server.ready_keys to a fresh list and save the current one
         * locally. This way as we run the old list we are free to call
         * signalListAsReady() that may push new elements in server.ready_keys
         * when handling clients blocked into BRPOPLPUSH. */
        l = server.ready_keys;
        server.ready_keys = listCreate();

        while(listLength(l) != 0) {
            listNode *ln = listFirst(l);
            readyList *rl = ln->value;

            /* First of all remove this key from db->ready_keys so that
             * we can safely call signalListAsReady() against this key. */
            dictDelete(rl->db->ready_keys,rl->key);

            /* If the key exists and it's a list, serve blocked clients
             * with data. */
            robj *o = lookupKeyWrite(rl->db,rl->key);
            if (o != NULL && o->type == REDIS_LIST) {
                dictEntry *de;

                /* We serve clients in the same order they blocked for
                 * this key, from the first blocked to the last. */
                de = dictFind(rl->db->blocking_keys,rl->key);
                if (de) {
                    list *clients = dictGetVal(de);
                    int numclients = listLength(clients);

                    while(numclients--) {
                        listNode *clientnode = listFirst(clients);
                        redisClient *receiver = clientnode->value;
                        robj *dstkey = receiver->bpop.target;
                        int where = (receiver->lastcmd &&
                                     receiver->lastcmd->proc == blpopCommand) ?
                                    REDIS_HEAD : REDIS_TAIL;
                        robj *value = listTypePop(o,where);

                        if (value) {
                            /* Protect receiver->bpop.target, that will be
                             * freed by the next unblockClientWaitingData()
                             * call. */
                            if (dstkey) incrRefCount(dstkey);
                            unblockClientWaitingData(receiver);

                            if (serveClientBlockedOnList(receiver,
                                rl->key,dstkey,rl->db,value,
                                where) == REDIS_ERR)
                            {
                                /* If we failed serving the client we need
                                 * to also undo the POP operation. */
                                    listTypePush(o,value,where);
                            }

                            if (dstkey) decrRefCount(dstkey);
                            decrRefCount(value);
                        } else {
                            break;
                        }
                    }
                }
                
                if (listTypeLength(o) == 0) dbDelete(rl->db,rl->key);
                /* We don't call signalModifiedKey() as it was already called
                 * when an element was pushed on the list. */
            }

            /* Free this item. */
            decrRefCount(rl->key);
            zfree(rl);
            listDelNode(l,ln);
        }
        listRelease(l); /* We have the new list on place at this point. */
    }
}
コード例 #7
0
ファイル: evict.c プロジェクト: kguidi10/sample_app
int freeMemoryIfNeeded(void) {
    size_t mem_reported, mem_used, mem_tofree, mem_freed;
    mstime_t latency, eviction_latency;
    long long delta;
    int slaves = listLength(server.slaves);

    /* When clients are paused the dataset should be static not just from the
     * POV of clients not being able to write, but also from the POV of
     * expires and evictions of keys not being performed. */
    if (clientsArePaused()) return C_OK;

    /* Check if we are over the memory usage limit. If we are not, no need
     * to subtract the slaves output buffers. We can just return ASAP. */
    mem_reported = zmalloc_used_memory();
    if (mem_reported <= server.maxmemory) return C_OK;

    /* Remove the size of slaves output buffers and AOF buffer from the
     * count of used memory. */
    mem_used = mem_reported;
    size_t overhead = freeMemoryGetNotCountedMemory();
    mem_used = (mem_used > overhead) ? mem_used-overhead : 0;

    /* Check if we are still over the memory limit. */
    if (mem_used <= server.maxmemory) return C_OK;

    /* Compute how much memory we need to free. */
    mem_tofree = mem_used - server.maxmemory;
    mem_freed = 0;

    if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION)
        goto cant_free; /* We need to free memory, but policy forbids. */

    latencyStartMonitor(latency);
    while (mem_freed < mem_tofree) {
        int j, k, i, keys_freed = 0;
        static int next_db = 0;
        sds bestkey = NULL;
        int bestdbid;
        redisDb *db;
        dict *dict;
        dictEntry *de;

        if (server.maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) ||
            server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL)
        {
            struct evictionPoolEntry *pool = EvictionPoolLRU;

            while(bestkey == NULL) {
                unsigned long total_keys = 0, keys;

                /* We don't want to make local-db choices when expiring keys,
                 * so to start populate the eviction pool sampling keys from
                 * every DB. */
                for (i = 0; i < server.dbnum; i++) {
                    db = server.db+i;
                    dict = (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ?
                            db->dict : db->expires;
                    if ((keys = dictSize(dict)) != 0) {
                        evictionPoolPopulate(i, dict, db->dict, pool);
                        total_keys += keys;
                    }
                }
                if (!total_keys) break; /* No keys to evict. */

                /* Go backward from best to worst element to evict. */
                for (k = EVPOOL_SIZE-1; k >= 0; k--) {
                    if (pool[k].key == NULL) continue;
                    bestdbid = pool[k].dbid;

                    if (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) {
                        de = dictFind(server.db[pool[k].dbid].dict,
                            pool[k].key);
                    } else {
                        de = dictFind(server.db[pool[k].dbid].expires,
                            pool[k].key);
                    }

                    /* Remove the entry from the pool. */
                    if (pool[k].key != pool[k].cached)
                        sdsfree(pool[k].key);
                    pool[k].key = NULL;
                    pool[k].idle = 0;

                    /* If the key exists, is our pick. Otherwise it is
                     * a ghost and we need to try the next element. */
                    if (de) {
                        bestkey = dictGetKey(de);
                        break;
                    } else {
                        /* Ghost... Iterate again. */
                    }
                }
            }
        }

        /* volatile-random and allkeys-random policy */
        else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM ||
                 server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM)
        {
            /* When evicting a random key, we try to evict a key for
             * each DB, so we use the static 'next_db' variable to
             * incrementally visit all DBs. */
            for (i = 0; i < server.dbnum; i++) {
                j = (++next_db) % server.dbnum;
                db = server.db+j;
                dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ?
                        db->dict : db->expires;
                if (dictSize(dict) != 0) {
                    de = dictGetRandomKey(dict);
                    bestkey = dictGetKey(de);
                    bestdbid = j;
                    break;
                }
            }
        }

        /* Finally remove the selected key. */
        if (bestkey) {
            db = server.db+bestdbid;
            robj *keyobj = createStringObject(bestkey,sdslen(bestkey));
            propagateExpire(db,keyobj,server.lazyfree_lazy_eviction);
            /* We compute the amount of memory freed by db*Delete() alone.
             * It is possible that actually the memory needed to propagate
             * the DEL in AOF and replication link is greater than the one
             * we are freeing removing the key, but we can't account for
             * that otherwise we would never exit the loop.
             *
             * AOF and Output buffer memory will be freed eventually so
             * we only care about memory used by the key space. */
            delta = (long long) zmalloc_used_memory();
            latencyStartMonitor(eviction_latency);
            if (server.lazyfree_lazy_eviction)
                dbAsyncDelete(db,keyobj);
            else
                dbSyncDelete(db,keyobj);
            latencyEndMonitor(eviction_latency);
            latencyAddSampleIfNeeded("eviction-del",eviction_latency);
            latencyRemoveNestedEvent(latency,eviction_latency);
            delta -= (long long) zmalloc_used_memory();
            mem_freed += delta;
            server.stat_evictedkeys++;
            notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted",
                keyobj, db->id);
            decrRefCount(keyobj);
            keys_freed++;

            /* When the memory to free starts to be big enough, we may
             * start spending so much time here that is impossible to
             * deliver data to the slaves fast enough, so we force the
             * transmission here inside the loop. */
            if (slaves) flushSlavesOutputBuffers();

            /* Normally our stop condition is the ability to release
             * a fixed, pre-computed amount of memory. However when we
             * are deleting objects in another thread, it's better to
             * check, from time to time, if we already reached our target
             * memory, since the "mem_freed" amount is computed only
             * across the dbAsyncDelete() call, while the thread can
             * release the memory all the time. */
            if (server.lazyfree_lazy_eviction && !(keys_freed % 16)) {
                overhead = freeMemoryGetNotCountedMemory();
                mem_used = zmalloc_used_memory();
                mem_used = (mem_used > overhead) ? mem_used-overhead : 0;
                if (mem_used <= server.maxmemory) {
                    mem_freed = mem_tofree;
                }
            }
        }

        if (!keys_freed) {
            latencyEndMonitor(latency);
            latencyAddSampleIfNeeded("eviction-cycle",latency);
            goto cant_free; /* nothing to free... */
        }
    }
    latencyEndMonitor(latency);
    latencyAddSampleIfNeeded("eviction-cycle",latency);
    return C_OK;

cant_free:
    /* We are here if we are not able to reclaim memory. There is only one
     * last thing we can try: check if the lazyfree thread has jobs in queue
     * and wait... */
    while(bioPendingJobsOfType(BIO_LAZY_FREE)) {
        if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree)
            break;
        usleep(1000);
    }
    return C_ERR;
}
コード例 #8
0
ファイル: dyn_gossip.c プロジェクト: amimimor/dynomite
static rstatus_t
gossip_add_node_if_absent(struct server_pool *sp,
		struct string *dc,
		struct string *rack,
		struct string *address,
		struct string *ip,
		struct string *port,
		struct dyn_token *token,
		uint8_t state,
		uint64_t timestamp)
{
	rstatus_t status;
	bool rack_existed = false;

	log_debug(LOG_VERB, "gossip_add_node_if_absent          : '%.*s'", address->len, address->data);

	struct gossip_dc * g_dc = dictFetchValue(gn_pool.dict_dc, dc);
	if (g_dc == NULL) {
		log_debug(LOG_VERB, "We don't have this datacenter? '%.*s' ", dc->len, dc->data);
		g_dc = array_push(&gn_pool.datacenters);
		gossip_dc_init(g_dc, dc);
		dictAdd(gn_pool.dict_dc, &g_dc->name, g_dc);
	} else {
		log_debug(LOG_VERB, "We got a datacenter in dict for '%.*s' ", dc->len, dc->data);
	}

	struct gossip_rack *g_rack = dictFetchValue(g_dc->dict_rack, rack);
	if (g_rack == NULL) {
		log_debug(LOG_VERB, "We don't have this rack? '%.*s' ", rack->len, rack->data);
		g_rack = array_push(&g_dc->racks);
		gossip_rack_init(g_rack, dc, rack);
		dictAdd(g_dc->dict_rack, &g_rack->name, g_rack);
	} else {
		log_debug(LOG_VERB, "We got a rack for '%.*s' ", rack->len, rack->data);
	}

	struct string *token_str = token_to_string(token);
	struct node *g_node = dictFetchValue(g_rack->dict_token_nodes, token_str);

	if (g_node == NULL) { //never existed
		log_debug(LOG_VERB, "Node not found!  We need to add it");
		log_debug(LOG_VERB, "adding node : dc[%.*s]", dc->len, dc->data);
		log_debug(LOG_VERB, "adding node : g_rack[%.*s]", g_rack->name.len, g_rack->name.data);
		log_debug(LOG_VERB, "adding node : address[%.*s]", address->len, address->data);
		log_debug(LOG_VERB, "adding node : ip[%.*s]", ip->len, ip->data);
		log_debug(LOG_VERB, "adding node : port[%.*s]", port->len, port->data);
		log_debug(LOG_VERB, "suggested state : %d", state);
		//print_dyn_token(token, 6);
		gossip_add_node(sp, dc, g_rack, address, ip, port, token, state);
	} else if (dictFind(g_rack->dict_name_nodes, ip) != NULL) {
		log_debug(LOG_VERB, "Node found");
		if (!g_node->is_local) {  //don't update myself here
			if (string_compare(&g_node->name, ip) != 0) {
				log_debug(LOG_WARN, "Replacing an existing token with new info");
				gossip_replace_node(sp, g_node, address, ip, state);
			} else {  //update state
				gossip_update_state(sp, g_node, state, timestamp);
			}
		}
	} else {
		log_debug(LOG_WARN, "Replacing an existing token with new IP or address");
		gossip_replace_node(sp, g_node, address, ip, state);
		dictAdd(g_rack->dict_name_nodes, &g_node->name, g_node);
	}

	//free token_str
	string_deinit(token_str);
	dn_free(token_str);
	return 0;
}
コード例 #9
0
ファイル: dyn_gossip.c プロジェクト: amimimor/dynomite
static rstatus_t
gossip_pool_each_init(void *elem, void *data)
{
	rstatus_t status;
	struct server_pool *sp = elem;

	gn_pool.ctx = sp->ctx;
	gn_pool.name = &sp->name;
	gn_pool.idx = sp->idx;
	gn_pool.g_interval = sp->g_interval;

	//dictDisableResize();
	gn_pool.dict_dc = dictCreate(&string_table_dict_type, NULL);

	gossip_set_seeds_provider(&sp->seed_provider);

	uint32_t n_dc = array_n(&sp->datacenters);
	if (n_dc == 0)
		return DN_OK;

	if (n_dc > 0) {
		status = array_init(&gn_pool.datacenters, n_dc, sizeof(struct gossip_dc));
		if (status != DN_OK) {
			return status;
		}
	}

	//add racks and datacenters
	uint32_t dc_cnt = array_n(&sp->datacenters);
	uint32_t dc_index;
	for(dc_index = 0; dc_index < dc_cnt; dc_index++) {
		struct datacenter *dc = array_get(&sp->datacenters, dc_index);
		uint32_t rack_cnt = array_n(&dc->racks);
		uint32_t rack_index;
		for(rack_index = 0; rack_index < rack_cnt; rack_index++) {
			struct rack *rack = array_get(&dc->racks, rack_index);

			if (dictFind(gn_pool.dict_dc, rack->dc) == NULL) {
				struct gossip_dc *g_dc = array_push(&gn_pool.datacenters);
				gossip_dc_init(g_dc, rack->dc);
				dictAdd(gn_pool.dict_dc, &g_dc->name, g_dc);
			}

			struct gossip_dc *g_dc = dictFetchValue(gn_pool.dict_dc, rack->dc);
			if (dictFind(g_dc->dict_rack, rack->name) == NULL) {
				log_debug(LOG_VERB, "What?? No rack in Dict for rack         : '%.*s'", g_dc->name);
				struct gossip_rack *g_rack = array_push(&g_dc->racks);
				gossip_rack_init(g_rack, rack->dc, rack->name);
				dictAdd(g_dc->dict_rack, &g_rack->name, g_rack);
			}
		}
	}

	uint32_t i, nelem;
	for (i = 0, nelem = array_n(&sp->peers); i < nelem; i++) {
		struct server *peer = array_get(&sp->peers, i);
		struct gossip_dc *g_dc = dictFetchValue(gn_pool.dict_dc, &peer->dc);
		struct gossip_rack *g_rack = dictFetchValue(g_dc->dict_rack, &peer->rack);
		struct node *gnode = array_push(&g_rack->nodes);

		node_init(gnode);

		string_copy(&gnode->dc, peer->dc.data, peer->dc.len);
		string_copy(&gnode->rack, g_rack->name.data, g_rack->name.len);
		string_copy(&gnode->name, peer->name.data, peer->name.len);
		string_copy(&gnode->pname, peer->pname.data, peer->pname.len); //ignore the port for now
		gnode->port = peer->port;
		gnode->is_local = peer->is_local;


		if (i == 0) { //Don't override its own state
			gnode->state = sp->ctx->dyn_state;  //likely it is JOINING state
			gnode->ts = (uint64_t)time(NULL);
			current_node = gnode;
			char *b_address = get_broadcast_address(sp);
			string_deinit(&gnode->name);
			string_copy(&gnode->name, b_address, dn_strlen(b_address));
		} else {
			gnode->state = DOWN;
			gnode->ts = 1010101;  //make this to be a very aged ts
		}

		struct dyn_token *ptoken = array_get(&peer->tokens, 0);
		copy_dyn_token(ptoken, &gnode->token);

		//copy socket stuffs

		g_rack->nnodes++;
		//add into dicts
		dictAdd(g_rack->dict_name_nodes, &gnode->name, gnode);
		dictAdd(g_rack->dict_token_nodes, token_to_string(&gnode->token), gnode);

		node_count++;

	}

	//gossip_debug();

	status = gossip_start(sp);
	if (status != DN_OK) {
		goto error;
	}

	return DN_OK;

	error:
	gossip_destroy(sp);
	return DN_OK;

}
コード例 #10
0
ファイル: db.c プロジェクト: abcdef506819/myblog
/* Overwrite an existing key with a new value. Incrementing the reference
 * count of the new value is up to the caller.
 * This function does not modify the expire time of the existing key.
 *
 * The program is aborted if the key was not already present. */
void dbOverwrite(redisDb *db, robj *key, robj *val) {
    struct dictEntry *de = dictFind(db->dict,key->ptr);

    redisAssert(de != NULL);
    dictReplace(db->dict, key->ptr, val);
}
コード例 #11
0
//函数会在redis每次执行完单个命令,事务块或lua脚本之后被调用
//对于所有被阻塞在client的key来说,只要key被执行了PUSH,那么这个key会被加入到server.ready_keys中
//处理client的阻塞状态
void handleClientsBlockedOnLists(void) {
    //只要server.ready_keys还有要解除阻塞的key,就循环遍历server.ready_keys链表
    while(listLength(server.ready_keys) != 0) {
        list *l;

        /* Point server.ready_keys to a fresh list and save the current one
         * locally. This way as we run the old list we are free to call
         * signalListAsReady() that may push new elements in server.ready_keys
         * when handling clients blocked into BRPOPLPUSH. */
        //备份一个server.ready_keys链表
        l = server.ready_keys;
        //生成一个新的空链表
        server.ready_keys = listCreate();

        //只要链表中还有就绪的key
        while(listLength(l) != 0) {
            listNode *ln = listFirst(l);    //链表头结点地址
            readyList *rl = ln->value;      //保存链表节点的值,每个值都是readyList结构

            /* First of all remove this key from db->ready_keys so that
             * we can safely call signalListAsReady() against this key. */
            //从rl->db->ready_keys中删除就绪的key
            dictDelete(rl->db->ready_keys,rl->key);

            /* If the key exists and it's a list, serve blocked clients
             * with data. */
            //以读操作将就绪key的值读出来
            robj *o = lookupKeyWrite(rl->db,rl->key);
            //读出的value对象必须是列表类型
            if (o != NULL && o->type == OBJ_LIST) {
                dictEntry *de;

                /* We serve clients in the same order they blocked for
                 * this key, from the first blocked to the last. */
                // blocking_keys是一个字典,字典的键是造成client阻塞的键,字典的值是链表,保存被阻塞的client
                // 根据key取出被阻塞的client
                de = dictFind(rl->db->blocking_keys,rl->key);
                // 链表非空
                if (de) {
                    // 获取de节点的值
                    list *clients = dictGetVal(de);
                    // 获取链表的长度
                    int numclients = listLength(clients);

                    //遍历链表的所有节点
                    while(numclients--) {
                        // 第一个client节点地址
                        listNode *clientnode = listFirst(clients);
                        // 取出节点的值,是一个client类型
                        client *receiver = clientnode->value;
                        // 从client类型中的target获得要PUSH出的dstkey,该键保存在target中
                        robj *dstkey = receiver->bpop.target;
                        // 获取弹出的位置,根据BRPOPLPUSH命令
                        int where = (receiver->lastcmd &&
                                     receiver->lastcmd->proc == blpopCommand) ?
                                    LIST_HEAD : LIST_TAIL;
                        // 从列表中弹出元素
                        robj *value = listTypePop(o,where);

                        // 弹出成功
                        if (value) {
                            /* Protect receiver->bpop.target, that will be
                             * freed by the next unblockClient()
                             * call. */
                            //增加dstkey的引用计数,保护该键,在unblockClient函数中释放
                            if (dstkey) incrRefCount(dstkey);
                            //取消client的阻塞状态
                            unblockClient(receiver);

                            // 将value推入造成client阻塞的键上,
                            if (serveClientBlockedOnList(receiver,
                                rl->key,dstkey,rl->db,value,
                                where) == C_ERR)
                            {
                                /* If we failed serving the client we need
                                 * to also undo the POP operation. */
                                    // 如果推入失败,则需要键弹出的value还原回去
                                    listTypePush(o,value,where);
                            }
                            // 释放dstkey和value
                            if (dstkey) decrRefCount(dstkey);
                            decrRefCount(value);
                        } else {
                            break;
                        }
                    }
                }

                // 如果弹出了所有元素,将key从数据库中删除
                if (listTypeLength(o) == 0) {
                    dbDelete(rl->db,rl->key);
                }
                /* We don't call signalModifiedKey() as it was already called
                 * when an element was pushed on the list. */
            }

            /* Free this item. */
            //释放所有空间
            decrRefCount(rl->key);
            zfree(rl);
            listDelNode(l,ln);
        }
        //释放原来的ready_keys,因为之前创建了新的链表
        listRelease(l); /* We have the new list on place at this point. */
    }
}
コード例 #12
0
ファイル: scripting.c プロジェクト: jiangguang5201314/ZNginx
void scriptCommand(redisClient *c)
{
    if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"flush"))
    {
        scriptingReset();
        addReply(c,shared.ok);
        server.dirty++; /* Replicating this command is a good idea. */
    }
    else if (c->argc >= 2 && !strcasecmp(c->argv[1]->ptr,"exists"))
    {
        int j;

        addReplyMultiBulkLen(c, c->argc-2);
        for (j = 2; j < c->argc; j++)
        {
            if (dictFind(server.lua_scripts,c->argv[j]->ptr))
                addReply(c,shared.cone);
            else
                addReply(c,shared.czero);
        }
    }
    else if (c->argc == 3 && !strcasecmp(c->argv[1]->ptr,"load"))
    {
        char funcname[43];
        sds sha;

        // 生成函数名
        funcname[0] = 'f';
        funcname[1] = '_';
        // 计算脚本的 SHA1 值
        sha1hex(funcname+2,c->argv[2]->ptr,sdslen(c->argv[2]->ptr));
        sha = sdsnewlen(funcname+2,40);
        // 根据 SHA1 值,在字典中查找脚本
        if (dictFind(server.lua_scripts,sha) == NULL)
        {
            // 如果脚本未定义,在 Lua 中创建新函数
            if (luaCreateFunction(c,server.lua,funcname,c->argv[2])
                    == REDIS_ERR)
            {
                sdsfree(sha);
                return;
            }
        }
        // 返回脚本的 SHA1 值
        addReplyBulkCBuffer(c,funcname+2,40);
        sdsfree(sha);
    }
    else if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"kill"))
    {
        if (server.lua_caller == NULL)
        {
            addReplySds(c,sdsnew("-NOTBUSY No scripts in execution right now.\r\n"));
        }
        else if (server.lua_write_dirty)
        {
            addReplySds(c,sdsnew("-UNKILLABLE Sorry the script already executed write commands against the dataset. You can either wait the script termination or kill the server in an hard way using the SHUTDOWN NOSAVE command.\r\n"));
        }
        else
        {
            server.lua_kill = 1;
            addReply(c,shared.ok);
        }
    }
    else
    {
        addReplyError(c, "Unknown SCRIPT subcommand or wrong # of args.");
    }
}
コード例 #13
0
ファイル: topbase.c プロジェクト: cinience/saker
struct ProcessInfo *findProcess(pid_t pid) {
    dictEntry *de = dictFind(server.process, &pid);
    if (de) return dictGetEntryVal(de);
    return NULL;
}
コード例 #14
0
static int
conf_parse(rmt_conf *cf)
{
    int ret;
    conf_pool *cp;
    dict *orgs, *org;
    dictEntry *de;
    sds key;
    
    if(cf == NULL){
        return RMT_ERROR;
    }

    orgs = cf->organizations;
    if(orgs == NULL){
        return RMT_ERROR;
    }

    //source pool
    key = sdsnew(CONF_ORGANIZATION_NAME_SOURCE);
    de = dictFind(orgs, key);
    if(de == NULL){
        log_error("ERROR: Can not find %s organization in conf file %s", 
            CONF_ORGANIZATION_NAME_SOURCE, cf->fname);
        sdsfree(key);
        return RMT_ERROR;
    }

    org = dictGetVal(de);
    if(org == NULL){
        log_error("ERROR: Dict %s entry value is NULL", dictGetKey(de));
        sdsfree(key);
        return RMT_ERROR;
    }

    cp = &cf->source_pool;
    ret = conf_parse_conf_pool(cp, org);
    if(ret != RMT_OK){
        log_error("ERROR: source pool conf parse error");
        sdsfree(key);
        return RMT_ERROR;
    }

    //target pool
    key = sdscpy(key, CONF_ORGANIZATION_NAME_TARGET);
    de = dictFind(orgs, key);
    if(de == NULL){
        log_error("ERROR: Can not find %s organization in conf file %s", 
            CONF_ORGANIZATION_NAME_TARGET, cf->fname);
        sdsfree(key);
        return RMT_ERROR;
    }

    org = dictGetVal(de);
    if(org == NULL){
        log_error("ERROR: Dict %s entry value is NULL", dictGetKey(de));
        sdsfree(key);
        return RMT_ERROR;
    }

    cp = &cf->target_pool;
    ret = conf_parse_conf_pool(cp, org);
    if(ret != RMT_OK){
        log_error("ERROR: target pool conf parse error");
        sdsfree(key);
        return RMT_ERROR;
    }

    //common
    key = sdscpy(key, CONF_ORGANIZATION_NAME_COMMAN);
    de = dictFind(orgs, key);
    if(de == NULL){
        log_error("ERROR: Can not find %s organization in conf file %s", 
            CONF_ORGANIZATION_NAME_COMMAN, cf->fname);
        sdsfree(key);
        return RMT_ERROR;
    }

    org = dictGetVal(de);
    if(org == NULL){
        log_error("ERROR: Dict %s entry value is NULL", dictGetKey(de));
        sdsfree(key);
        return RMT_ERROR;
    }

    ret = conf_parse_conf_common(cf, org);
    if(ret != RMT_OK){
        log_error("ERROR: common conf parse error");
        sdsfree(key);
        return RMT_ERROR;
    }    

    sdsfree(key);
    
    return RMT_OK;
}
コード例 #15
0
ファイル: db.c プロジェクト: Xwuming/misc
int dbExists(redisDb *db, robj *key) {
    return dictFind(db->dict,key->ptr) != NULL;
}
コード例 #16
0
ファイル: ack.c プロジェクト: lamby/pkg-disque
/* This function is called by cluster.c every time we receive a GOTACK message
 * about a job we know. */
void gotAckReceived(clusterNode *sender, job *job, int known) {
    /* A dummy ACK is an acknowledged job that we created just becakse a client
     * send us ACKJOB about a job we were not aware. */
    int dummy_ack = dictSize(job->nodes_delivered) == 0;

    serverLog(LL_VERBOSE,"RECEIVED GOTACK FROM %.40s FOR JOB %.48s",
        sender->name, job->id);

    /* We should never receive a GOTACK for a job which is not acknowledged,
     * but it is more robust to handle it explicitly. */
    if (job->state != JOB_STATE_ACKED) return;

    /* If this is a dummy ACK, and we reached a node that knows about this job,
     * it's up to it to perform the garbage collection, so we can forget about
     * this job and reclaim memory. */
    if (dummy_ack && known) {
        serverLog(LL_VERBOSE,"Deleting %.48s: authoritative node reached",
            job->id);
        unregisterJob(job);
        freeJob(job);
        return;
    }

    /* If the sender knows about the job, or if we have the sender in the list
     * of nodes that may have the job (even if it no longer remembers about
     * the job), we do two things:
     *
     * 1) Add the node to the list of nodes_delivered. It is likely already
     *    there... so this should be useless, but is a good invariant
     *    to enforce.
     * 2) Add the node to the list of nodes that acknowledged the ACK. */
    if (known || dictFind(job->nodes_delivered,sender->name) != NULL) {
        dictAdd(job->nodes_delivered,sender->name,sender);
        /* job->nodes_confirmed exists if we started a job garbage collection,
         * but we may receive GOTACK messages in other conditions sometimes,
         * since we reply with SETACK to QUEUED and WILLQUEUE if the job is
         * acknowledged but we did not yet started to GC. So we need to test
         * if the hash table actually exists. */
        if (job->nodes_confirmed)
            dictAdd(job->nodes_confirmed,sender->name,sender);
    }

    /* If our job is actually a dummy ACK, we are still interested to collect
     * all the nodes in the cluster that reported they don't have a clue:
     * eventually if everybody in the cluster has no clue, we can safely remove
     * the dummy ACK. */
    if (!known && dummy_ack) {
        dictAdd(job->nodes_confirmed,sender->name,sender);
        if (dictSize(job->nodes_confirmed) >= dictSize(server.cluster->nodes))
        {
            serverLog(LL_VERBOSE,
                "Deleting %.48s: dummy ACK not known cluster-wide",
                job->id);
            unregisterJob(job);
            freeJob(job);
            return;
        }
    }

    /* Finally the common case: our SETACK reached everybody. Broadcast
     * a DELJOB to all the nodes involved, and delete the job. */
    if (!dummy_ack && job->nodes_confirmed &&
         dictSize(job->nodes_confirmed) >= dictSize(job->nodes_delivered))
    {
        serverLog(LL_VERBOSE,
            "Deleting %.48s: All nodes involved acknowledged the job",
            job->id);
        clusterBroadcastDelJob(job);
        unregisterJob(job);
        freeJob(job);
        return;
    }
}
コード例 #17
0
ファイル: db.c プロジェクト: Xwuming/misc
int removeExpire(redisDb *db, robj *key) {
    /* An expire may only be removed if there is a corresponding entry in the
     * main dict. Otherwise, the key will never be freed. */
    serverAssertWithInfo(NULL,key,dictFind(db->dict,key->ptr) != NULL);
    return dictDelete(db->expires,key->ptr) == DICT_OK;
}
コード例 #18
0
ファイル: t_zset.c プロジェクト: DJHartley/redis
/* This generic command implements both ZADD and ZINCRBY.
 * scoreval is the score if the operation is a ZADD (doincrement == 0) or
 * the increment if the operation is a ZINCRBY (doincrement == 1). */
void zaddGenericCommand(redisClient *c, robj *key, robj *ele, double scoreval, int doincrement) {
    robj *zsetobj;
    zset *zs;
    double *score;

    zsetobj = lookupKeyWrite(c->db,key);
    if (zsetobj == NULL) {
        zsetobj = createZsetObject();
        dbAdd(c->db,key,zsetobj);
    } else {
        if (zsetobj->type != REDIS_ZSET) {
            addReply(c,shared.wrongtypeerr);
            return;
        }
    }
    zs = zsetobj->ptr;

    /* Ok now since we implement both ZADD and ZINCRBY here the code
     * needs to handle the two different conditions. It's all about setting
     * '*score', that is, the new score to set, to the right value. */
    score = zmalloc(sizeof(double));
    if (doincrement) {
        dictEntry *de;

        /* Read the old score. If the element was not present starts from 0 */
        de = dictFind(zs->dict,ele);
        if (de) {
            double *oldscore = dictGetEntryVal(de);
            *score = *oldscore + scoreval;
        } else {
            *score = scoreval;
        }
        if (isnan(*score)) {
            addReplySds(c,
                sdsnew("-ERR resulting score is not a number (NaN)\r\n"));
            zfree(score);
            /* Note that we don't need to check if the zset may be empty and
             * should be removed here, as we can only obtain Nan as score if
             * there was already an element in the sorted set. */
            return;
        }
    } else {
        *score = scoreval;
    }

    /* What follows is a simple remove and re-insert operation that is common
     * to both ZADD and ZINCRBY... */
    if (dictAdd(zs->dict,ele,score) == DICT_OK) {
        /* case 1: New element */
        incrRefCount(ele); /* added to hash */
        zslInsert(zs->zsl,*score,ele);
        incrRefCount(ele); /* added to skiplist */
        touchWatchedKey(c->db,c->argv[1]);
        server.dirty++;
        if (doincrement)
            addReplyDouble(c,*score);
        else
            addReply(c,shared.cone);
    } else {
        dictEntry *de;
        double *oldscore;

        /* case 2: Score update operation */
        de = dictFind(zs->dict,ele);
        redisAssert(de != NULL);
        oldscore = dictGetEntryVal(de);
        if (*score != *oldscore) {
            int deleted;

            /* Remove and insert the element in the skip list with new score */
            deleted = zslDelete(zs->zsl,*oldscore,ele);
            redisAssert(deleted != 0);
            zslInsert(zs->zsl,*score,ele);
            incrRefCount(ele);
            /* Update the score in the hash table */
            dictReplace(zs->dict,ele,score);
            touchWatchedKey(c->db,c->argv[1]);
            server.dirty++;
        } else {
            zfree(score);
        }
        if (doincrement)
            addReplyDouble(c,*score);
        else
            addReply(c,shared.czero);
    }
}
コード例 #19
0
ファイル: evict.c プロジェクト: kguidi10/sample_app
void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evictionPoolEntry *pool) {
    int j, k, count;
    dictEntry *samples[server.maxmemory_samples];

    count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples);
    for (j = 0; j < count; j++) {
        unsigned long long idle;
        sds key;
        robj *o;
        dictEntry *de;

        de = samples[j];
        key = dictGetKey(de);

        /* If the dictionary we are sampling from is not the main
         * dictionary (but the expires one) we need to lookup the key
         * again in the key dictionary to obtain the value object. */
        if (server.maxmemory_policy != MAXMEMORY_VOLATILE_TTL) {
            if (sampledict != keydict) de = dictFind(keydict, key);
            o = dictGetVal(de);
        }

        /* Calculate the idle time according to the policy. This is called
         * idle just because the code initially handled LRU, but is in fact
         * just a score where an higher score means better candidate. */
        if (server.maxmemory_policy & MAXMEMORY_FLAG_LRU) {
            idle = estimateObjectIdleTime(o);
        } else if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
            /* When we use an LRU policy, we sort the keys by idle time
             * so that we expire keys starting from greater idle time.
             * However when the policy is an LFU one, we have a frequency
             * estimation, and we want to evict keys with lower frequency
             * first. So inside the pool we put objects using the inverted
             * frequency subtracting the actual frequency to the maximum
             * frequency of 255. */
            idle = 255-LFUDecrAndReturn(o);
        } else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
            /* In this case the sooner the expire the better. */
            idle = ULLONG_MAX - (long)dictGetVal(de);
        } else {
            serverPanic("Unknown eviction policy in evictionPoolPopulate()");
        }

        /* Insert the element inside the pool.
         * First, find the first empty bucket or the first populated
         * bucket that has an idle time smaller than our idle time. */
        k = 0;
        while (k < EVPOOL_SIZE &&
               pool[k].key &&
               pool[k].idle < idle) k++;
        if (k == 0 && pool[EVPOOL_SIZE-1].key != NULL) {
            /* Can't insert if the element is < the worst element we have
             * and there are no empty buckets. */
            continue;
        } else if (k < EVPOOL_SIZE && pool[k].key == NULL) {
            /* Inserting into empty position. No setup needed before insert. */
        } else {
            /* Inserting in the middle. Now k points to the first element
             * greater than the element to insert.  */
            if (pool[EVPOOL_SIZE-1].key == NULL) {
                /* Free space on the right? Insert at k shifting
                 * all the elements from k to end to the right. */

                /* Save SDS before overwriting. */
                sds cached = pool[EVPOOL_SIZE-1].cached;
                memmove(pool+k+1,pool+k,
                    sizeof(pool[0])*(EVPOOL_SIZE-k-1));
                pool[k].cached = cached;
            } else {
                /* No free space on right? Insert at k-1 */
                k--;
                /* Shift all elements on the left of k (included) to the
                 * left, so we discard the element with smaller idle time. */
                sds cached = pool[0].cached; /* Save SDS before overwriting. */
                if (pool[0].key != pool[0].cached) sdsfree(pool[0].key);
                memmove(pool,pool+1,sizeof(pool[0])*k);
                pool[k].cached = cached;
            }
        }

        /* Try to reuse the cached SDS string allocated in the pool entry,
         * because allocating and deallocating this object is costly
         * (according to the profiler, not my fantasy. Remember:
         * premature optimizbla bla bla bla. */
        int klen = sdslen(key);
        if (klen > EVPOOL_CACHED_SDS_SIZE) {
            pool[k].key = sdsdup(key);
        } else {
            memcpy(pool[k].cached,key,klen+1);
            sdssetlen(pool[k].cached,klen);
            pool[k].key = pool[k].cached;
        }
        pool[k].idle = idle;
        pool[k].dbid = dbid;
    }
}
コード例 #20
0
ファイル: t_zset.c プロジェクト: DJHartley/redis
void zunionInterGenericCommand(redisClient *c, robj *dstkey, int op) {
    int i, j, setnum;
    int aggregate = REDIS_AGGR_SUM;
    zsetopsrc *src;
    robj *dstobj;
    zset *dstzset;
    dictIterator *di;
    dictEntry *de;
    int touched = 0;

    /* expect setnum input keys to be given */
    setnum = atoi(c->argv[2]->ptr);
    if (setnum < 1) {
        addReplySds(c,sdsnew("-ERR at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE\r\n"));
        return;
    }

    /* test if the expected number of keys would overflow */
    if (3+setnum > c->argc) {
        addReply(c,shared.syntaxerr);
        return;
    }

    /* read keys to be used for input */
    src = zmalloc(sizeof(zsetopsrc) * setnum);
    for (i = 0, j = 3; i < setnum; i++, j++) {
        robj *obj = lookupKeyWrite(c->db,c->argv[j]);
        if (!obj) {
            src[i].dict = NULL;
        } else {
            if (obj->type == REDIS_ZSET) {
                src[i].dict = ((zset*)obj->ptr)->dict;
            } else if (obj->type == REDIS_SET) {
                src[i].dict = (obj->ptr);
            } else {
                zfree(src);
                addReply(c,shared.wrongtypeerr);
                return;
            }
        }

        /* default all weights to 1 */
        src[i].weight = 1.0;
    }

    /* parse optional extra arguments */
    if (j < c->argc) {
        int remaining = c->argc - j;

        while (remaining) {
            if (remaining >= (setnum + 1) && !strcasecmp(c->argv[j]->ptr,"weights")) {
                j++; remaining--;
                for (i = 0; i < setnum; i++, j++, remaining--) {
                    if (getDoubleFromObjectOrReply(c,c->argv[j],&src[i].weight,
                            "weight value is not a double") != REDIS_OK)
                    {
                        zfree(src);
                        return;
                    }
                }
            } else if (remaining >= 2 && !strcasecmp(c->argv[j]->ptr,"aggregate")) {
                j++; remaining--;
                if (!strcasecmp(c->argv[j]->ptr,"sum")) {
                    aggregate = REDIS_AGGR_SUM;
                } else if (!strcasecmp(c->argv[j]->ptr,"min")) {
                    aggregate = REDIS_AGGR_MIN;
                } else if (!strcasecmp(c->argv[j]->ptr,"max")) {
                    aggregate = REDIS_AGGR_MAX;
                } else {
                    zfree(src);
                    addReply(c,shared.syntaxerr);
                    return;
                }
                j++; remaining--;
            } else {
                zfree(src);
                addReply(c,shared.syntaxerr);
                return;
            }
        }
    }

    /* sort sets from the smallest to largest, this will improve our
     * algorithm's performance */
    qsort(src,setnum,sizeof(zsetopsrc),qsortCompareZsetopsrcByCardinality);

    dstobj = createZsetObject();
    dstzset = dstobj->ptr;

    if (op == REDIS_OP_INTER) {
        /* skip going over all entries if the smallest zset is NULL or empty */
        if (src[0].dict && dictSize(src[0].dict) > 0) {
            /* precondition: as src[0].dict is non-empty and the zsets are ordered
             * from small to large, all src[i > 0].dict are non-empty too */
            di = dictGetIterator(src[0].dict);
            while((de = dictNext(di)) != NULL) {
                double *score = zmalloc(sizeof(double)), value;
                *score = src[0].weight * zunionInterDictValue(de);

                for (j = 1; j < setnum; j++) {
                    dictEntry *other = dictFind(src[j].dict,dictGetEntryKey(de));
                    if (other) {
                        value = src[j].weight * zunionInterDictValue(other);
                        zunionInterAggregate(score, value, aggregate);
                    } else {
                        break;
                    }
                }

                /* skip entry when not present in every source dict */
                if (j != setnum) {
                    zfree(score);
                } else {
                    robj *o = dictGetEntryKey(de);
                    dictAdd(dstzset->dict,o,score);
                    incrRefCount(o); /* added to dictionary */
                    zslInsert(dstzset->zsl,*score,o);
                    incrRefCount(o); /* added to skiplist */
                }
            }
            dictReleaseIterator(di);
        }
    } else if (op == REDIS_OP_UNION) {
        for (i = 0; i < setnum; i++) {
            if (!src[i].dict) continue;

            di = dictGetIterator(src[i].dict);
            while((de = dictNext(di)) != NULL) {
                /* skip key when already processed */
                if (dictFind(dstzset->dict,dictGetEntryKey(de)) != NULL) continue;

                double *score = zmalloc(sizeof(double)), value;
                *score = src[i].weight * zunionInterDictValue(de);

                /* because the zsets are sorted by size, its only possible
                 * for sets at larger indices to hold this entry */
                for (j = (i+1); j < setnum; j++) {
                    dictEntry *other = dictFind(src[j].dict,dictGetEntryKey(de));
                    if (other) {
                        value = src[j].weight * zunionInterDictValue(other);
                        zunionInterAggregate(score, value, aggregate);
                    }
                }

                robj *o = dictGetEntryKey(de);
                dictAdd(dstzset->dict,o,score);
                incrRefCount(o); /* added to dictionary */
                zslInsert(dstzset->zsl,*score,o);
                incrRefCount(o); /* added to skiplist */
            }
            dictReleaseIterator(di);
        }
    } else {
        /* unknown operator */
        redisAssert(op == REDIS_OP_INTER || op == REDIS_OP_UNION);
    }

    if (dbDelete(c->db,dstkey)) {
        touchWatchedKey(c->db,dstkey);
        touched = 1;
        server.dirty++;
    }
    if (dstzset->zsl->length) {
        dbAdd(c->db,dstkey,dstobj);
        addReplyLongLong(c, dstzset->zsl->length);
        if (!touched) touchWatchedKey(c->db,dstkey);
        server.dirty++;
    } else {
        decrRefCount(dstobj);
        addReply(c, shared.czero);
    }
    zfree(src);
}
コード例 #21
0
ファイル: slots.c プロジェクト: 107192468/codis
/* *
 * slotscheck
 * */
void
slotscheckCommand(redisClient *c) {
    sds bug = NULL;
    int i;
    for (i = 0; i < HASH_SLOTS_SIZE && bug == NULL; i ++) {
        dict *d = c->db->hash_slots[i];
        if (dictSize(d) == 0) {
            continue;
        }
        list *l = listCreate();
        listSetFreeMethod(l, decrRefCountVoid);
        unsigned long cursor = 0;
        do {
            cursor = dictScan(d, cursor, slotsScanSdsKeyCallback, l);
            while (1) {
                listNode *head = listFirst(l);
                if (head == NULL) {
                    break;
                }
                robj *key = listNodeValue(head);
                if (lookupKey(c->db, key) == NULL) {
                    if (bug == NULL) {
                        bug = sdsdup(key->ptr);
                    }
                }
                listDelNode(l, head);
            }
        } while (cursor != 0 && bug == NULL);
        listRelease(l);
    }
    if (bug != NULL) {
        addReplyErrorFormat(c, "step 1, miss = '%s'", bug);
        sdsfree(bug);
        return;
    }
    do {
        dict *d = c->db->dict;
        if (dictSize(d) == 0) {
            break;
        }
        list *l = listCreate();
        listSetFreeMethod(l, decrRefCountVoid);
        unsigned long cursor = 0;
        do {
            cursor = dictScan(d, cursor, slotsScanSdsKeyCallback, l);
            while (1) {
                listNode *head = listFirst(l);
                if (head == NULL) {
                    break;
                }
                robj *key = listNodeValue(head);
                int slot = slots_num(key->ptr, NULL);
                if (dictFind(c->db->hash_slots[slot], key->ptr) == NULL) {
                    if (bug == NULL) {
                        bug = sdsdup(key->ptr);
                    }
                }
                listDelNode(l, head);
            }
        } while (cursor != 0 && bug == NULL);
        listRelease(l);
    } while (0);
    if (bug != NULL) {
        addReplyErrorFormat(c, "step 2, miss = '%s'", bug);
        sdsfree(bug);
        return;
    }
    addReply(c, shared.ok);
}
コード例 #22
0
ファイル: object.c プロジェクト: huangyue/redis-2.2.8
/* This is an helper function for the DEBUG command. We need to lookup keys
 * without any modification of LRU or other parameters. */
robj *objectCommandLookup(redisClient *c, robj *key) {
    dictEntry *de;

    if ((de = dictFind(c->db->dict,key->ptr)) == NULL) return NULL;
    return (robj*) dictGetEntryVal(de);
}
コード例 #23
0
ファイル: vm.c プロジェクト: ambakshi/redis
/* Every time a thread finished a Job, it writes a byte into the write side
 * of an unix pipe in order to "awake" the main thread, and this function
 * is called.
 *
 * Note that this is called both by the event loop, when a I/O thread
 * sends a byte in the notification pipe, and is also directly called from
 * waitEmptyIOJobsQueue().
 *
 * In the latter case we don't want to swap more, so we use the
 * "privdata" argument setting it to a not NULL value to signal this
 * condition. */
void vmThreadedIOCompletedJob(aeEventLoop *el, int fd, void *privdata,
            int mask)
{
    char buf[1];
    int retval, processed = 0, toprocess = -1, trytoswap = 1;
    REDIS_NOTUSED(el);
    REDIS_NOTUSED(mask);
    REDIS_NOTUSED(privdata);

    if (privdata != NULL) trytoswap = 0; /* check the comments above... */

    /* For every byte we read in the read side of the pipe, there is one
     * I/O job completed to process. */
#ifndef _WIN32
    while((retval = read(fd,buf,1)) == 1) {
#else
    DWORD pipe_is_on = 0;

    while (1) {
        retval = 0;
        /*Windows fix: We need to peek pipe, since read would block. */
        if (!PeekNamedPipe((HANDLE) _get_osfhandle(fd), NULL, 0, NULL, &pipe_is_on, NULL)) {
           redisLog(REDIS_DEBUG,"PeekReadPipe failed %s", strerror(GetLastError()));
           break;
        }

        /* No data on pipe */
        if (!pipe_is_on)
            break;

        if ((retval = read(fd,buf,1)) != 1)
            break;
#endif
        iojob *j;
        listNode *ln;
        struct dictEntry *de;

        /* Get the processed element (the oldest one) */
        lockThreadedIO();
        redisLog(REDIS_DEBUG,"Processing I/O completed job");
        redisAssert(listLength(server.io_processed) != 0);
        if (toprocess == -1) {
            toprocess = (listLength(server.io_processed)*REDIS_MAX_COMPLETED_JOBS_PROCESSED)/100;
            if (toprocess <= 0) toprocess = 1;
        }
        ln = listFirst(server.io_processed);
        j = ln->value;
        listDelNode(server.io_processed,ln);
        unlockThreadedIO();
        /* If this job is marked as canceled, just ignore it */
        if (j->canceled) {
            freeIOJob(j);
            continue;
        }
        /* Post process it in the main thread, as there are things we
         * can do just here to avoid race conditions and/or invasive locks */
        redisLog(REDIS_DEBUG,"COMPLETED Job type: %d, ID %p, key: %s", j->type, (void*)j->id, (unsigned char*)j->key->ptr);
        de = dictFind(j->db->dict,j->key->ptr);
        redisAssert(de != NULL);
        if (j->type == REDIS_IOJOB_LOAD) {
            redisDb *db;
            vmpointer *vp = dictGetEntryVal(de);

            /* Key loaded, bring it at home */
            vmMarkPagesFree(vp->page,vp->usedpages);
            redisLog(REDIS_DEBUG, "VM: object %s loaded from disk (threaded)",
                (unsigned char*) j->key->ptr);
            server.vm_stats_swapped_objects--;
            server.vm_stats_swapins++;
            dictGetEntryVal(de) = j->val;
            incrRefCount(j->val);
            db = j->db;
            /* Handle clients waiting for this key to be loaded. */
            handleClientsBlockedOnSwappedKey(db,j->key);
            freeIOJob(j);
            zfree(vp);
        } else if (j->type == REDIS_IOJOB_PREPARE_SWAP) {
            /* Now we know the amount of pages required to swap this object.
             * Let's find some space for it, and queue this task again
             * rebranded as REDIS_IOJOB_DO_SWAP. */
            if (!vmCanSwapOut() ||
                vmFindContiguousPages(&j->page,j->pages) == REDIS_ERR)
            {
                /* Ooops... no space or we can't swap as there is
                 * a fork()ed Redis trying to save stuff on disk. */
                j->val->storage = REDIS_VM_MEMORY; /* undo operation */
                freeIOJob(j);
            } else {
                /* Note that we need to mark this pages as used now,
                 * if the job will be canceled, we'll mark them as freed
                 * again. */
                vmMarkPagesUsed(j->page,j->pages);
                j->type = REDIS_IOJOB_DO_SWAP;
                lockThreadedIO();
                queueIOJob(j);
                unlockThreadedIO();
            }
        } else if (j->type == REDIS_IOJOB_DO_SWAP) {
            vmpointer *vp;

            /* Key swapped. We can finally free some memory. */
            if (j->val->storage != REDIS_VM_SWAPPING) {
                vmpointer *vp = (vmpointer*) j->id;
                printf("storage: %d\n",vp->storage);
                printf("key->name: %s\n",(char*)j->key->ptr);
                printf("val: %p\n",(void*)j->val);
                printf("val->type: %d\n",j->val->type);
                printf("val->ptr: %s\n",(char*)j->val->ptr);
            }
            redisAssert(j->val->storage == REDIS_VM_SWAPPING);
            vp = createVmPointer(j->val);
            vp->page = j->page;
            vp->usedpages = j->pages;
            dictGetEntryVal(de) = vp;
            /* Fix the storage otherwise decrRefCount will attempt to
             * remove the associated I/O job */
            j->val->storage = REDIS_VM_MEMORY;
            decrRefCount(j->val);
            redisLog(REDIS_DEBUG,
                "VM: object %s swapped out at %lld (%lld pages) (threaded)",
                (unsigned char*) j->key->ptr,
                (unsigned long long) j->page, (unsigned long long) j->pages);
            server.vm_stats_swapped_objects++;
            server.vm_stats_swapouts++;
            freeIOJob(j);
            /* Put a few more swap requests in queue if we are still
             * out of memory */
            if (trytoswap && vmCanSwapOut() &&
                zmalloc_used_memory() > server.vm_max_memory)
            {
                int more = 1;
                while(more) {
                    lockThreadedIO();
                    more = listLength(server.io_newjobs) <
                            (unsigned) server.vm_max_threads;
                    unlockThreadedIO();
                    /* Don't waste CPU time if swappable objects are rare. */
                    if (vmSwapOneObjectThreaded() == REDIS_ERR) {
                        trytoswap = 0;
                        break;
                    }
                }
            }
        }
        processed++;
        if (processed == toprocess) return;
    }
    if (retval < 0 && errno != EAGAIN) {
        redisLog(REDIS_WARNING,
            "WARNING: read(2) error in vmThreadedIOCompletedJob() %s",
            strerror(errno));
    }
}

void lockThreadedIO(void) {
    pthread_mutex_lock(&server.io_mutex);
}
コード例 #24
0
ファイル: blocked.c プロジェクト: charpty/redis
/* This function should be called by Redis every time a single command,
 * a MULTI/EXEC block, or a Lua script, terminated its execution after
 * being called by a client.
 *
 * All the keys with at least one client blocked that received at least
 * one new element via some PUSH/XADD operation are accumulated into
 * the server.ready_keys list. This function will run the list and will
 * serve clients accordingly. Note that the function will iterate again and
 * again as a result of serving BRPOPLPUSH we can have new blocking clients
 * to serve because of the PUSH side of BRPOPLPUSH. */
void handleClientsBlockedOnKeys(void) {
    while(listLength(server.ready_keys) != 0) {
        list *l;

        /* Point server.ready_keys to a fresh list and save the current one
         * locally. This way as we run the old list we are free to call
         * signalKeyAsReady() that may push new elements in server.ready_keys
         * when handling clients blocked into BRPOPLPUSH. */
        l = server.ready_keys;
        server.ready_keys = listCreate();

        while(listLength(l) != 0) {
            listNode *ln = listFirst(l);
            readyList *rl = ln->value;

            /* First of all remove this key from db->ready_keys so that
             * we can safely call signalKeyAsReady() against this key. */
            dictDelete(rl->db->ready_keys,rl->key);

            /* Serve clients blocked on list key. */
            robj *o = lookupKeyWrite(rl->db,rl->key);
            if (o != NULL && o->type == OBJ_LIST) {
                dictEntry *de;

                /* We serve clients in the same order they blocked for
                 * this key, from the first blocked to the last. */
                de = dictFind(rl->db->blocking_keys,rl->key);
                if (de) {
                    list *clients = dictGetVal(de);
                    int numclients = listLength(clients);

                    while(numclients--) {
                        listNode *clientnode = listFirst(clients);
                        client *receiver = clientnode->value;

                        if (receiver->btype != BLOCKED_LIST) {
                            /* Put on the tail, so that at the next call
                             * we'll not run into it again. */
                            listDelNode(clients,clientnode);
                            listAddNodeTail(clients,receiver);
                            continue;
                        }

                        robj *dstkey = receiver->bpop.target;
                        int where = (receiver->lastcmd &&
                                     receiver->lastcmd->proc == blpopCommand) ?
                                    LIST_HEAD : LIST_TAIL;
                        robj *value = listTypePop(o,where);

                        if (value) {
                            /* Protect receiver->bpop.target, that will be
                             * freed by the next unblockClient()
                             * call. */
                            if (dstkey) incrRefCount(dstkey);
                            unblockClient(receiver);

                            if (serveClientBlockedOnList(receiver,
                                rl->key,dstkey,rl->db,value,
                                where) == C_ERR)
                            {
                                /* If we failed serving the client we need
                                 * to also undo the POP operation. */
                                    listTypePush(o,value,where);
                            }

                            if (dstkey) decrRefCount(dstkey);
                            decrRefCount(value);
                        } else {
                            break;
                        }
                    }
                }

                if (listTypeLength(o) == 0) {
                    dbDelete(rl->db,rl->key);
                    notifyKeyspaceEvent(NOTIFY_GENERIC,"del",rl->key,rl->db->id);
                }
                /* We don't call signalModifiedKey() as it was already called
                 * when an element was pushed on the list. */
            }

            /* Serve clients blocked on stream key. */
            else if (o != NULL && o->type == OBJ_STREAM) {
                dictEntry *de = dictFind(rl->db->blocking_keys,rl->key);
                stream *s = o->ptr;

                /* We need to provide the new data arrived on the stream
                 * to all the clients that are waiting for an offset smaller
                 * than the current top item. */
                if (de) {
                    list *clients = dictGetVal(de);
                    listNode *ln;
                    listIter li;
                    listRewind(clients,&li);

                    while((ln = listNext(&li))) {
                        client *receiver = listNodeValue(ln);
                        if (receiver->btype != BLOCKED_STREAM) continue;
                        streamID *gt = dictFetchValue(receiver->bpop.keys,
                                                      rl->key);
                        if (s->last_id.ms > gt->ms ||
                            (s->last_id.ms == gt->ms &&
                             s->last_id.seq > gt->seq))
                        {
                            streamID start = *gt;
                            start.seq++; /* Can't overflow, it's an uint64_t */

                            /* If we blocked in the context of a consumer
                             * group, we need to resolve the group and
                             * consumer here. */
                            streamCG *group = NULL;
                            streamConsumer *consumer = NULL;
                            if (receiver->bpop.xread_group) {
                                group = streamLookupCG(s,
                                        receiver->bpop.xread_group->ptr);
                                /* In theory if the group is not found we
                                 * just perform the read without the group,
                                 * but actually when the group, or the key
                                 * itself is deleted (triggering the removal
                                 * of the group), we check for blocked clients
                                 * and send them an error. */
                            }
                            if (group) {
                                consumer = streamLookupConsumer(group,
                                           receiver->bpop.xread_consumer->ptr,
                                           1);
                            }

                            /* Note that after we unblock the client, 'gt'
                             * and other receiver->bpop stuff are no longer
                             * valid, so we must do the setup above before
                             * this call. */
                            unblockClient(receiver);

                            /* Emit the two elements sub-array consisting of
                             * the name of the stream and the data we
                             * extracted from it. Wrapped in a single-item
                             * array, since we have just one key. */
                            addReplyMultiBulkLen(receiver,1);
                            addReplyMultiBulkLen(receiver,2);
                            addReplyBulk(receiver,rl->key);

                            streamPropInfo pi = {
                                rl->key,
                                receiver->bpop.xread_group
                            };
                            streamReplyWithRange(receiver,s,&start,NULL,
                                                 receiver->bpop.xread_count,
                                                 0, group, consumer, 0, &pi);
                        }
                    }
                }
            }

            /* Free this item. */
            decrRefCount(rl->key);
            zfree(rl);
            listDelNode(l,ln);
        }
        listRelease(l); /* We have the new list on place at this point. */
    }
}