erts_sspa_data_t *
erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
{
    erts_sspa_data_t *data;
    size_t tot_size;
    size_t chunk_mem_size;
    char *p;
    char *chunk_start;
    int cix;
    int no_blocks = pa_size;
    int no_blocks_per_chunk;
    size_t aligned_blk_sz;

#if !defined(ERTS_STRUCTURE_ALIGNED_ALLOC)
    /* Force 64-bit alignment... */
    aligned_blk_sz = ((blk_sz - 1) / 8) * 8 + 8;
#else
    /* Alignment of structure is enough... */
    aligned_blk_sz = blk_sz;
#endif

    if (!name) { /* schedulers only variant */
        ASSERT(!nthreads);
        nthreads = erts_no_schedulers;
    }
    else {
        ASSERT(nthreads > 0);
    }

    if (nthreads == 1)
	no_blocks_per_chunk = no_blocks;
    else {
	int extra = (no_blocks - 1)/4 + 1;
	if (extra == 0)
	    extra = 1;
	no_blocks_per_chunk = no_blocks;
	no_blocks_per_chunk += extra * nthreads;
	no_blocks_per_chunk /= nthreads;
    }
    no_blocks = no_blocks_per_chunk * nthreads;
    chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
    chunk_mem_size += aligned_blk_sz * no_blocks_per_chunk;
    chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
    tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
    tot_size += chunk_mem_size * nthreads;

    p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
    data = (erts_sspa_data_t *) p;
    p += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
    chunk_start = p;

    data->chunks_mem_size = chunk_mem_size;
    data->start = chunk_start;
    data->end = chunk_start + chunk_mem_size * nthreads;
    data->nthreads = nthreads;

    if (name) { /* thread variant */
        erts_tsd_key_create(&data->tsd_key, (char*)name);
        erts_atomic_init_nob(&data->id_generator, 0);
    }

    /* Initialize all chunks */
    for (cix = 0; cix < nthreads; cix++) {
	erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
	erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
	erts_sspa_blk_t *blk;
	int i;

	erts_atomic_init_nob(&chdr->tail.data.last, (erts_aint_t) &chdr->tail.data.marker);
	erts_atomic_init_nob(&chdr->tail.data.marker.next_atmc, ERTS_AINT_NULL);
	erts_atomic_init_nob(&chdr->tail.data.um_refc[0], 0);
	erts_atomic_init_nob(&chdr->tail.data.um_refc[1], 0);
	erts_atomic32_init_nob(&chdr->tail.data.um_refc_ix, 0);

	chdr->head.no_thr_progress_check = 0;
	chdr->head.used_marker = 1;
	chdr->head.first = &chdr->tail.data.marker;
	chdr->head.unref_end = &chdr->tail.data.marker;
	chdr->head.next.thr_progress = erts_thr_progress_current();
	chdr->head.next.thr_progress_reached = 1;
	chdr->head.next.um_refc_ix = 1;
	chdr->head.next.unref_end = &chdr->tail.data.marker;

	p = &chnk->data[0];
	chdr->local.first = (erts_sspa_blk_t *) p;
	blk = (erts_sspa_blk_t *) p;
	for (i = 0; i < no_blocks_per_chunk; i++) {
	    blk = (erts_sspa_blk_t *) p;
	    p += aligned_blk_sz;
	    blk->next_ptr = (erts_sspa_blk_t *) p;
	}

	blk->next_ptr = NULL;
	chdr->local.last = blk;
	chdr->local.cnt = no_blocks_per_chunk;
	chdr->local.lim = no_blocks_per_chunk / 3;

	ERTS_SSPA_DBG_CHK_LCL(chdr);
    }

    return data;
}
Ejemplo n.º 2
0
erts_sspa_data_t *
erts_sspa_create(size_t blk_sz, int pa_size)
{
    erts_sspa_data_t *data;
    size_t tot_size;
    size_t chunk_mem_size;
    char *p;
    char *chunk_start;
    int cix;
    int no_blocks = pa_size;
    int no_blocks_per_chunk;

    if (erts_no_schedulers == 1)
	no_blocks_per_chunk = no_blocks;
    else {
	int extra = (no_blocks - 1)/4 + 1;
	if (extra == 0)
	    extra = 1;
	no_blocks_per_chunk = no_blocks;
	no_blocks_per_chunk += extra*erts_no_schedulers;
	no_blocks_per_chunk /= erts_no_schedulers;
    }
    no_blocks = no_blocks_per_chunk * erts_no_schedulers;
    chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
    chunk_mem_size += blk_sz * no_blocks_per_chunk;
    chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
    tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
    tot_size += chunk_mem_size*erts_no_schedulers;

    p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
    data = (erts_sspa_data_t *) p;
    p += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
    chunk_start = p;

    data->chunks_mem_size = chunk_mem_size;
    data->start = chunk_start;
    data->end = chunk_start + chunk_mem_size*erts_no_schedulers;

    /* Initialize all chunks */
    for (cix = 0; cix < erts_no_schedulers; cix++) {
	erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
	erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
	erts_sspa_blk_t *blk;
	int i;

	erts_atomic_init_nob(&chdr->tail.data.last, (erts_aint_t) &chdr->tail.data.marker);
	erts_atomic_init_nob(&chdr->tail.data.marker.next_atmc, ERTS_AINT_NULL);
	erts_atomic_init_nob(&chdr->tail.data.um_refc[0], 0);
	erts_atomic_init_nob(&chdr->tail.data.um_refc[1], 0);
	erts_atomic32_init_nob(&chdr->tail.data.um_refc_ix, 0);

	chdr->head.no_thr_progress_check = 0;
	chdr->head.used_marker = 1;
	chdr->head.first = &chdr->tail.data.marker;
	chdr->head.unref_end = &chdr->tail.data.marker;
	chdr->head.next.thr_progress = erts_thr_progress_current();
	chdr->head.next.thr_progress_reached = 1;
	chdr->head.next.um_refc_ix = 1;
	chdr->head.next.unref_end = &chdr->tail.data.marker;

	p = &chnk->data[0];
	chdr->local.first = (erts_sspa_blk_t *) p;
	blk = (erts_sspa_blk_t *) p;
	for (i = 0; i < no_blocks_per_chunk; i++) {
	    blk = (erts_sspa_blk_t *) p;
	    p += blk_sz;
	    blk->next_ptr = (erts_sspa_blk_t *) p;
	}

	blk->next_ptr = NULL;
	chdr->local.last = blk;
	chdr->local.cnt = no_blocks_per_chunk;
	chdr->local.lim = no_blocks_per_chunk / 3;

	ERTS_SSPA_DBG_CHK_LCL(chdr);
    }

    return data;
}