Пример #1
0
void obj_cache_destroy(struct obj_cache *cache) 
{
    int stat;
    void *data;
    struct slab_meta *slab_to_free;

    if (!cache) {
        return;
    }

    if (cache->slabs) {
        slab_to_free = cache->slabs->next;

        while (slab_to_free) {
            data = (void *)((uintptr_t)slab_to_free - 
                                       (cache->slab_size - 
                                        sizeof(struct slab_meta)));
            ASSERT_ALIGNMENT(cache->slab_size, data);
            slab_to_free = slab_to_free->next;
            stat = munmap(data, cache->slab_size);
            assert(stat == 0);
        }

        data = (void *)((uintptr_t)cache->slabs - (cache->slab_size - 
                                                   sizeof(struct slab_meta)));
        ASSERT_ALIGNMENT(cache->slab_size, data);

        stat = munmap(data, cache->slab_size);
        assert(stat == 0);
    }

    stat = munmap(cache, sizeof(struct obj_cache));
    assert(stat == 0);
}
Пример #2
0
inline static struct slab_meta *find_slab_meta(void *slab, size_t slab_size)
{
    /* The slab meta-data is at the end of the slab memory */
    struct slab_meta *meta = (void *)((uintptr_t)slab + slab_size - 
                                      sizeof(struct slab_meta));
    ASSERT_ALIGNMENT(MALLOC_ALIGN, meta);
    return meta;
}
Пример #3
0
static void obj_cache_init_freelist(struct obj_cache *cache, void *slab) 
{
    unsigned int i;
    struct list *freelist = slab;

    for (i = 0; i < cache->objects_per_slab - 1; i++) {
        freelist->next = (void *)((uintptr_t)freelist + 
                                            cache->object_size);
        ASSERT_ALIGNMENT(cache->alignment, freelist->next);
        freelist = freelist->next;
    }
    
    freelist->next = NULL;
    cache->freelist = ((struct list *)slab)->next;
}
Пример #4
0
void cryptonite_skein512_update(struct skein512_ctx *ctx, const uint8_t *data, uint32_t len)
{
	uint32_t to_fill;

	if (!len)
		return;

	to_fill = 64 - ctx->bufindex;

	if (ctx->bufindex == 64) {
		skein512_do_chunk(ctx, (uint64_t *) ctx->buf, 64);
		ctx->bufindex = 0;
	}

	/* process partial buffer if there's enough data to make a block
	 * and there's without doubt further blocks */
	if (ctx->bufindex && len > to_fill) {
		memcpy(ctx->buf + ctx->bufindex, data, to_fill);
		skein512_do_chunk(ctx, (uint64_t *) ctx->buf, 64);
		len -= to_fill;
		data += to_fill;
		ctx->bufindex = 0;
	}

	if (need_alignment(data, 8)) {
		uint64_t tramp[8];
		ASSERT_ALIGNMENT(tramp, 8);
		for (; len > 64; len -= 64, data += 64) {
			memcpy(tramp, data, 64);
			skein512_do_chunk(ctx, tramp, 64);
		}
	} else {
		/* process as much 64-block as possible except the last one in case we finalize */
		for (; len > 64; len -= 64, data += 64)
			skein512_do_chunk(ctx, (uint64_t *) data, 64);
	}

	/* append data into buf */
	if (len) {
		memcpy(ctx->buf + ctx->bufindex, data, len);
		ctx->bufindex += len;
	}
}
Пример #5
0
void *obj_cache_alloc(struct obj_cache * cache)
{
    void *ret = NULL;

    if (!cache) {
        return ret;
    }

    if (cache->freelist) {
        ret = cache->freelist;
        cache->freelist = cache->freelist->next;
    } else {
        ret = obj_cache_add_slab(cache); 
    }

    ASSERT_ALIGNMENT(cache->alignment, ret);
    
    if (ret) {
        obj_cache_increment_slab_refcount(cache, ret);
    }

    return ret;
} 
Пример #6
0
inline static void *find_slab_head(size_t slab_alignment, void *obj)
{
    void *slab = (void *)((uintptr_t)obj & ~(slab_alignment - 1));
    ASSERT_ALIGNMENT(slab_alignment, slab);
    return slab;
}