Example #1
0
/* CALLED WITH slabs_lock HELD */
static void *slab_rebalance_alloc(const size_t size, unsigned int id) {
    slabclass_t *s_cls;
    s_cls = &slabclass[slab_rebal.s_clsid];
    int x;
    item *new_it = NULL;

    for (x = 0; x < s_cls->perslab; x++) {
        new_it = do_slabs_alloc(size, id, NULL, SLABS_ALLOC_NO_NEWPAGE);
        /* check that memory isn't within the range to clear */
        if (new_it == NULL) {
            break;
        }
        if ((void *)new_it >= slab_rebal.slab_start
            && (void *)new_it < slab_rebal.slab_end) {
            /* Pulled something we intend to free. Mark it as freed since
             * we've already done the work of unlinking it from the freelist.
             */
            s_cls->requested -= size;
            new_it->refcount = 0;
            new_it->it_flags = ITEM_SLABBED|ITEM_FETCHED;
            new_it = NULL;
            slab_rebal.inline_reclaim++;
        } else {
            break;
        }
    }
    return new_it;
}
Example #2
0
void *slabs_alloc(slabs_t* pst, size_t size) {
    void *ret;
    size += sizeof(slabheader_t);
    unsigned int id = slabs_clsid(pst, size);
    ret = do_slabs_alloc(pst, size, id);
    return (void*)((char*)ret + sizeof(slabheader_t));
}
Example #3
0
File: slabs.c Project: 4e/memcached
void *slabs_alloc(size_t size, unsigned int id) {
    void *ret;

    pthread_mutex_lock(&slabs_lock);
    ret = do_slabs_alloc(size, id);
    pthread_mutex_unlock(&slabs_lock);
    return ret;
}
Example #4
0
void *slabs_alloc(struct default_engine *engine, size_t size, unsigned int id) {
    void *ret;

    cb_mutex_enter(&engine->slabs.lock);
    ret = do_slabs_alloc(engine, size, id);
    cb_mutex_exit(&engine->slabs.lock);
    return ret;
}
Example #5
0
// 包裹函数
void *slabs_alloc(size_t size, unsigned int id) {
    void *ret;
	// item中的处理用的是cache_lock
    pthread_mutex_lock(&slabs_lock);
    ret = do_slabs_alloc(size, id);
    pthread_mutex_unlock(&slabs_lock);
    return ret;
}
Example #6
0
void* Slab::slabs_alloc(size_t size, unsigned int id) {
    void* ret;
    slabs_lock.lock();
    ret = do_slabs_alloc(size, id);
    slabs_lock.unlock();

    return ret;
}
Example #7
0
void *slabs_alloc(struct default_engine *engine, size_t size, unsigned int id) {
    void *ret;

    pthread_mutex_lock(&engine->slabs.lock);
    ret = do_slabs_alloc(engine, size, id);
    pthread_mutex_unlock(&engine->slabs.lock);
    return ret;
}
Example #8
0
void *slabs_alloc(size_t size, unsigned int id, unsigned int *total_chunks) {
    void *ret;

    pthread_mutex_lock(&slabs_lock);
    ret = do_slabs_alloc(size, id, total_chunks);
    pthread_mutex_unlock(&slabs_lock);
    return ret;
}
Example #9
0
void *slabs_alloc(size_t size, unsigned int id, uint64_t *total_bytes,
        unsigned int flags) {
    void *ret;

    pthread_mutex_lock(&slabs_lock);
    ret = do_slabs_alloc(size, id, total_bytes, flags);
    pthread_mutex_unlock(&slabs_lock);
    return ret;
}
Example #10
0
void *slabs_alloc(struct default_engine *engine, size_t size, unsigned int id)
{
    void *ret;

    if (id < POWER_SMALLEST || id > engine->slabs.power_largest)
        return NULL;
    pthread_mutex_lock(&engine->slabs.lock);
    ret = do_slabs_alloc(engine, size, id);
    pthread_mutex_unlock(&engine->slabs.lock);
    return ret;
}
Example #11
0
static sm_blck_t *do_smmgr_blck_alloc(struct default_engine *engine)
{
    sm_blck_t *blck = (sm_blck_t *)do_slabs_alloc(engine, sm_anchor.blck_tsize, sm_anchor.blck_clsid);
    if (blck != NULL) {
        if (sm_anchor.free_limit_space > 0) {
            sm_anchor.free_chunk_space -= sm_anchor.blck_tsize;
        }
        do_smmgr_used_blck_link(blck);
    } else {
        logger->log(EXTENSION_LOG_INFO, NULL, "no more small memory chunk\n");
    }
    return blck;
}