Example #1
0
static bool do_slabs_adjust_mem_limit(size_t new_mem_limit) {
    /* Cannot adjust memory limit at runtime if prealloc'ed */
    if (mem_base != NULL)
        return false;
    settings.maxbytes = new_mem_limit;
    mem_limit = new_mem_limit;
    mem_limit_reached = false; /* Will reset on next alloc */
    memory_release(); /* free what might already be in the global pool */
    return true;
}
Example #2
0
void memory_finalize()
{
    if (!use_allocator) return;

    if (memory_table != NULL)
    {
        for (int memid = 0; memid < memory_table_size; memid++)
            memory_release(memid);
        FREE_ALIGN(memory_table);
        memory_table = NULL;
    }
    use_allocator = 0;
}
Example #3
0
static void slab_rebalance_finish(void) {
    slabclass_t *s_cls;
    slabclass_t *d_cls;
    int x;
    uint32_t rescues;
    uint32_t evictions_nomem;
    uint32_t inline_reclaim;

    pthread_mutex_lock(&slabs_lock);

    s_cls = &slabclass[slab_rebal.s_clsid];
    d_cls = &slabclass[slab_rebal.d_clsid];

#ifdef DEBUG_SLAB_MOVER
    /* If the algorithm is broken, live items can sneak in. */
    slab_rebal.slab_pos = slab_rebal.slab_start;
    while (1) {
        item *it = slab_rebal.slab_pos;
        assert(it->it_flags == (ITEM_SLABBED|ITEM_FETCHED));
        assert(memcmp(ITEM_key(it), "deadbeef", 8) == 0);
        it->it_flags = ITEM_SLABBED|ITEM_FETCHED;
        slab_rebal.slab_pos = (char *)slab_rebal.slab_pos + s_cls->size;
        if (slab_rebal.slab_pos >= slab_rebal.slab_end)
            break;
    }
#endif

    /* At this point the stolen slab is completely clear.
     * We always kill the "first"/"oldest" slab page in the slab_list, so
     * shuffle the page list backwards and decrement.
     */
    s_cls->slabs--;
    for (x = 0; x < s_cls->slabs; x++) {
        s_cls->slab_list[x] = s_cls->slab_list[x+1];
    }

    d_cls->slab_list[d_cls->slabs++] = slab_rebal.slab_start;
    /* Don't need to split the page into chunks if we're just storing it */
    if (slab_rebal.d_clsid > SLAB_GLOBAL_PAGE_POOL) {
        memset(slab_rebal.slab_start, 0, (size_t)settings.item_size_max);
        split_slab_page_into_freelist(slab_rebal.slab_start,
            slab_rebal.d_clsid);
    } else if (slab_rebal.d_clsid == SLAB_GLOBAL_PAGE_POOL) {
        /* mem_malloc'ed might be higher than mem_limit. */
        memory_release();
    }

    slab_rebal.done       = 0;
    slab_rebal.s_clsid    = 0;
    slab_rebal.d_clsid    = 0;
    slab_rebal.slab_start = NULL;
    slab_rebal.slab_end   = NULL;
    slab_rebal.slab_pos   = NULL;
    evictions_nomem    = slab_rebal.evictions_nomem;
    inline_reclaim = slab_rebal.inline_reclaim;
    rescues   = slab_rebal.rescues;
    slab_rebal.evictions_nomem    = 0;
    slab_rebal.inline_reclaim = 0;
    slab_rebal.rescues  = 0;

    slab_rebalance_signal = 0;

    pthread_mutex_unlock(&slabs_lock);

    STATS_LOCK();
    stats.slabs_moved++;
    stats.slab_reassign_rescues += rescues;
    stats.slab_reassign_evictions_nomem += evictions_nomem;
    stats.slab_reassign_inline_reclaim += inline_reclaim;
    stats_state.slab_reassign_running = false;
    STATS_UNLOCK();

    if (settings.verbose > 1) {
        fprintf(stderr, "finished a slab move\n");
    }
}
Example #4
0
int memory_register(void* baseaddr, size_t mem_size, const char* uuid, int do_create)
{
    if (!use_allocator) return 0;

    int memid;
    size_t offset = 0;
    size_t max_quant_params_count = 1;

    for (memid = 0; memid < memory_table_size; memid++)
        if (!memory_table[memid].in_use) break;

    if (memid == memory_table_size)
        memory_table_grow(EP_MEMORY_TABLE_INC);

    memory_t* memptr = &memory_table[memid];
    memptr->in_use = 1;
    memptr->do_create = do_create;
    memptr->shm_base = NULL;
    memptr->client_shm_base = baseaddr;
    memptr->shm_mspace = NULL;
    memptr->shm_id = -1;
    memptr->mem_size = mem_size;

    if (baseaddr == NULL)
        offset = (((sizeof(intptr_t*) * (max_ep + 1)) / PAGE_SIZE) + 1) * PAGE_SIZE;

    if (do_create)
    {
        /* Client */
        /* Create a shared memory segment */
        snprintf(memptr->shm_filename, SHM_FILENAME_LEN, SHM_FILENAME_PREFIX"%s%d", uuid, memid);
        if ((memptr->shm_id = shm_open(memptr->shm_filename, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IROTH)) < 0)
        {
            PRINT("CLIENT: shm_open failed (%s)\n", strerror(errno));
            memory_release(memid);
            return -1;
        }

        /* Adjust the size of the shared memory segment */
        if (ftruncate(memptr->shm_id, mem_size) < 0)
        {
            PRINT("CLIENT: ftruncate failed (%s)\n", strerror(errno));
            memory_release(memid);
            return -1;
        }

        /* Create mmap region */
        if ((memptr->shm_base = (void*)mmap(baseaddr, mem_size, (PROT_WRITE|PROT_READ),
                                            (baseaddr) ? (MAP_FIXED|MAP_SHARED) : MAP_SHARED,
                                            memptr->shm_id, 0)) == MAP_FAILED)
        {
            PRINT("CLIENT: mmap failed (%s)\n", strerror(errno));
            memory_release(memid);
            return -1;
        }
    }
    else
    {
        /* Server */
        /* Open shared memory region */
        snprintf(memptr->shm_filename, SHM_FILENAME_LEN, SHM_FILENAME_PREFIX"%s%d", uuid, memid);
        if ((memptr->shm_id = shm_open(memptr->shm_filename, O_RDWR, S_IRWXU|S_IRWXG|S_IROTH)) < 0)
        {
            DEBUG_PRINT("SERVER: shm_open failed (%s)\n", strerror(errno));
            memory_release(memid);
            return -1;
        }

        /* Create mmap region */
        if ((memptr->shm_base = (void*)mmap(NULL, mem_size, PROT_WRITE|PROT_READ, MAP_SHARED, memptr->shm_id, 0)) == MAP_FAILED)
        {
            DEBUG_PRINT("SERVER: mmap failed (%s)\n", strerror(errno));
            memory_release(memid);
            return -1;
        }
    }

    ASSERT(memptr->shm_base != NULL);

    if (do_create)
    {
        memptr->client_shm_base = memptr->shm_base;
        if (baseaddr == NULL)
        {
            /* Track client shmem address and server cqueue address */
            for (int i = 0; i < max_ep + 1 + max_quant_params_count; i++)
            {
                void* cqueue_base = (intptr_t*)memptr->shm_base + i;
                if (i == 0)
                    /* Index 0: client shared memory region */
                    *(intptr_t*)cqueue_base = (intptr_t)memptr->shm_base;
                else
                    /* Index 1 to max_ep: server cqueue address */
                    *(intptr_t*)cqueue_base = (intptr_t)-1;
            }
            /* Create dlmalloc mspace only for allocated regions */
            intptr_t mspace_start = (intptr_t)memptr->shm_base + (intptr_t)offset;
            memptr->shm_mspace = create_mspace_with_base((void*)mspace_start, mem_size-offset, 1);
            if (memptr->shm_mspace == NULL)
            {
                memory_release(memid);
                return -1;
            }
        }
        DEBUG_PRINT("CLIENT: %p %ld %ld %p %ld %p\n",
                    memptr->shm_base, *(intptr_t*)memptr->shm_base,
                    offset, memptr->shm_base + (intptr_t)offset,
                    mem_size, memptr->shm_mspace);
    }
    else
        DEBUG_PRINT("SERVER: %p %ld %ld %p %ld\n",
                    memptr->shm_base, *(intptr_t*)memptr->shm_base,
                    offset, memptr->shm_base + (intptr_t)offset,
                    mem_size);

    return memid;
}