Example #1
0
//扩展字典,在检查到需要扩展的时候,这里包含了检测的过程
static int _dictExpandIfNeeded(dict *d)
{
    /* Incremental rehashing already in progress. Return. */
    //如果正在执行rehash操作,直接返回
    if (dictIsRehashing(d)) return DICT_OK;

    /* If the hash table is empty expand it to the initial size. */
    //第一次添加元素,执行扩展
    if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE);

    /* If we reached the 1:1 ratio, and we are allowed to resize the hash
     * table (global setting) or we should avoid it but the ratio between
     * elements/buckets is over the "safe" threshold, we resize doubling
     * the number of buckets. */
    //如果设置了resize标记,那么在USED/BUCKET的比率大于1的时候就会执行扩展
    //如果没有设置该标志,在该比率超过dict_force_resize_ratio的时候再进行扩展
    if (d->ht[0].used >= d->ht[0].size &&
        (dict_can_resize ||
         d->ht[0].used/d->ht[0].size > dict_force_resize_ratio))
    {
        //扩展的结果是bucket的数目增大一倍
        return dictExpand(d, ((d->ht[0].size > d->ht[0].used) ?
                                    d->ht[0].size : d->ht[0].used)*2);
    }
    return DICT_OK;
}
Example #2
0
File: dict.c Project: snoplus/orca
/* Expand the hash table if needed */
static int _dictExpandIfNeeded(dict *ht) {
    /* If the hash table is empty expand it to the initial size,
     * if the table is "full" dobule its size. */
    if (ht->size == 0)
        return dictExpand(ht, DICT_HT_INITIAL_SIZE);
    if (ht->used == ht->size)
        return dictExpand(ht, ht->size*2);
    return DICT_OK;
}
Example #3
0
/* Expand the hash table if needed */
static int _dictExpandIfNeeded(dict *d)
{
    /* If the hash table is empty expand it to the intial size,
     * if the table is "full" dobule its size. */
    if (dictIsRehashing(d)) return DICT_OK;
    if (d->ht[0].size == 0)
        return dictExpand(d, DICT_HT_INITIAL_SIZE);
    if (d->ht[0].used >= d->ht[0].size && dict_can_resize)
        return dictExpand(d, ((d->ht[0].size > d->ht[0].used) ?
                                    d->ht[0].size : d->ht[0].used)*2);
    return DICT_OK;
}
Example #4
0
File: t_set.c Project: jrun/redis
/* Convert the set to specified encoding. The resulting dict (when converting
 * to a hashtable) is presized to hold the number of elements in the original
 * set. */
void setTypeConvert(robj *setobj, int enc) {
    setTypeIterator *si;
    redisAssert(setobj->type == REDIS_SET &&
                setobj->encoding == REDIS_ENCODING_INTSET);

    if (enc == REDIS_ENCODING_HT) {
        int64_t intele;
        dict *d = dictCreate(&setDictType,NULL);
        robj *element;

        /* Presize the dict to avoid rehashing */
        dictExpand(d,intsetLen(setobj->ptr));

        /* To add the elements we extract integers and create redis objects */
        si = setTypeInitIterator(setobj);
        while (setTypeNext(si,NULL,&intele) != -1) {
            element = createStringObjectFromLongLong(intele);
            redisAssert(dictAdd(d,element,NULL) == DICT_OK);
        }
        setTypeReleaseIterator(si);

        setobj->encoding = REDIS_ENCODING_HT;
        zfree(setobj->ptr);
        setobj->ptr = d;
    } else {
        redisPanic("Unsupported set conversion");
    }
}
Example #5
0
/* Resize the table to the minimal size that contains all the elements,
 * but with the invariant of a USER/BUCKETS ratio near to <= 1 */
int dictResize(dict *d)
{
    int minimal;

    if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR;
    minimal = d->ht[0].used;
    if (minimal < DICT_HT_INITIAL_SIZE)
        minimal = DICT_HT_INITIAL_SIZE;
    return dictExpand(d, minimal);
}
Example #6
0
ccache *cacheCreate() {
    ccache *c = malloc(sizeof(*c));
    c->accesslist = listCreate();
    c->data = dictCreate(&ccacheType,NULL);
    dictExpand(c->data,PRESERVED_CACHE_ENTRIES);
    c->outboxOld = safeQueueCreate();
    c->outboxNew = safeQueueCreate();
    c->inboxNew = safeQueueCreate();
    return c;
}
Example #7
0
/* Expand the hash table if needed */
static int _dictExpandIfNeeded(dict *d)
{
    /* Incremental rehashing already in progress. Return. */
    if (dictIsRehashing(d)) return DICT_OK;

    /* If the hash table is empty expand it to the initial size. */
    if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE);

    /* If we reached the 1:1 ratio, and we are allowed to resize the hash
     * table (global setting) or we should avoid it but the ratio between
     * elements/buckets is over the "safe" threshold, we resize doubling
     * the number of buckets. */
    if (d->ht[0].used >= d->ht[0].size &&
        (dict_can_resize ||
         d->ht[0].used/d->ht[0].size > dict_force_resize_ratio))
    {
        return dictExpand(d, d->ht[0].used*2);
    }
    return DICT_OK;
}
Example #8
0
/* Resize the table to the minimal size that contains all the elements,
 * but with the invariant of a USED/BUCKETS ratio near to <= 1 */
int dictResize(dict *d)
{
    int minimal;

    //在未设置resize标记和rehash进行中的字典不能进行扩展
    if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR;
    //扩展之后的bucket数目将大于当前哈希表中的entry的总数,这样可以使USED/BUCKETS的
    //比率小于1,尽量达到O(1)查询的效率
    minimal = d->ht[0].used;
    if (minimal < DICT_HT_INITIAL_SIZE)
        minimal = DICT_HT_INITIAL_SIZE;
    return dictExpand(d, minimal);
}
Example #9
0
/*************************************************************
* Function		:	ExpandIfNeeded
* Author 		:       bulldozer.ma
* Date 			:       2015-11-01
* Input 		:       dict *pHeader
* Output 		:       N/A
* Return 		:       int 
* Other 		:       N/A
* Description 		:      	ExpandIfNeeded 
**************************************************************/
static int ExpandIfNeeded(dict *pHeader)
{
	if (NULL == pHeader || dictIsRehashing(pHeader))
	{
		return DICT_ERROR;
	}
	unsigned int uiUsed = 0, uiSize = 0;
	uiSize = pHeader->ht[0]->size;
	uiUsed = pHeader->ht[0]->used;
	if (uiUsed > uiSize &&  1 <= uiUsed / uiSize)
	{
		return dictExpand(pHeader, pHeader->ht[1]->size);
	}
	return DICT_ERROR;
}
Example #10
0
void cacheMasterInit() {
    pthread_attr_t attr;
    master_cache = dictCreate(&objSdsDictType,NULL);
    dictExpand(master_cache,PRESERVED_CACHE_ENTRIES);
    slave_caches = listCreate();
    master_total_mem = 0;
    /* Default Http  Not Found */
    HTTP_NOT_FOUND = objSdsFromSds(sdsnew("HTTP/1.1 404 OK\r\nContent-Length: 9\r\n\r\nNot Found"));
    objSdsAddRef(HTTP_NOT_FOUND);
    HTTP_NOT_FOUND->state = OBJSDS_OK;
    /* status */
    statusQuery = sdsnew("/status");
    objSds *status_value = objSdsCreate();
    status_value->ref = 2; /* ensure that '/status' entry will not be freed */
    next_master_refresh_time += time(NULL) + MASTER_STATUS_REFRESH_PERIOD;
    dictAdd(master_cache,statusQuery,status_value);
    status_value->ptr = _masterGetStatus();
    status_value->state = OBJSDS_OK;

    /* Initialize mutex and condition variable objects */
    /* For portability, explicitly create threads in a joinable state */
    pthread_attr_init(&attr);
    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
    pthread_create(&master_thread, &attr, _masterWatch, NULL);
    bioInit();

    /* favicon.ico */
    faviconQuery = sdsnew("/favicon.ico");
    objSds *favicon_value = objSdsCreate();
    favicon_value->ref = 2; /* ensure that the entry will not be freed */
    favicon_value->state = OBJSDS_WAITING;
    dictAdd(master_cache,faviconQuery,favicon_value);
    sds staticFaviconQuery = sdsnew("/static/favicon.ico"); /* static file query */
    dictAdd(master_cache,staticFaviconQuery,favicon_value);
    bioPushGeneralJob(staticFaviconQuery);
}