Esempio n. 1
0
Eterm
erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period)
{
    int i, len, max, min, allctr;
    Eterm *names, *values, res;
    Uint arr_size, stat_size, hsz, *hszp, *hp, **hpp;
    Stat_t *stat_src, *stat;

    if (!erts_instr_stat)
	return am_false;

    if (!atoms_initialized)
	init_atoms();

    if (what == am.total) {
	min		= 0;
	max		= 0;
	allctr		= 0;
	stat_size	= sizeof(Stat_t);
	stat_src	= &stats->tot;
	if (!am_tot)
	    init_am_tot();
	names		= am_tot;
    }
    else if (what == am.allocators) {
	min		= ERTS_ALC_A_MIN;
	max		= ERTS_ALC_A_MAX;
	allctr		= 1;
	stat_size	= sizeof(Stat_t)*(ERTS_ALC_A_MAX+1);
	stat_src	= stats->a;
	if (!am_a)
	    init_am_a();
	names		= am_a;
    }
    else if (what == am.classes) {
	min		= ERTS_ALC_C_MIN;
	max		= ERTS_ALC_C_MAX;
	allctr		= 0;
	stat_size	= sizeof(Stat_t)*(ERTS_ALC_C_MAX+1);
	stat_src	= stats->c;
	if (!am_c)
	    init_am_c();
	names		= &am_c[ERTS_ALC_C_MIN];
    }
    else if (what == am.types) {
	min		= ERTS_ALC_N_MIN;
	max		= ERTS_ALC_N_MAX;
	allctr		= 0;
	stat_size	= sizeof(Stat_t)*(ERTS_ALC_N_MAX+1);
	stat_src	= stats->n;
	if (!am_n)
	    init_am_n();
	names		= &am_n[ERTS_ALC_N_MIN];
    }
    else {
	return THE_NON_VALUE;
    }

    stat = (Stat_t *) erts_alloc(ERTS_ALC_T_TMP, stat_size);

    arr_size = (max - min + 1)*sizeof(Eterm);

    if (allctr)
	names = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size);

    values = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size);

    erts_mtx_lock(&instr_mutex);

    update_max_ever_values(stat_src, min, max);

    sys_memcpy((void *) stat, (void *) stat_src, stat_size);

    if (begin_max_period)
	begin_new_max_period(stat_src, min, max);

    erts_mtx_unlock(&instr_mutex);

    hsz = 0;
    hszp = &hsz;
    hpp = NULL;

 restart_bld:

    len = 0;
    for (i = min; i <= max; i++) {
	if (!allctr || erts_allctrs_info[i].enabled) {
	    Eterm s[2];

	    if (allctr)
		names[len] = am_a[i];
	    
	    s[0] = bld_tuple(hpp, hszp, 4,
			     am.sizes,
			     bld_uint(hpp, hszp, stat[i].size),
			     bld_uint(hpp, hszp, stat[i].max_size),
			     bld_uint(hpp, hszp, stat[i].max_size_ever));

	    s[1] = bld_tuple(hpp, hszp, 4,
			     am.blocks,
			     bld_uint(hpp, hszp, stat[i].blocks),
			     bld_uint(hpp, hszp, stat[i].max_blocks),
			     bld_uint(hpp, hszp, stat[i].max_blocks_ever));

	    values[len] = bld_list(hpp, hszp, 2, s);
	    
	    len++;
	}
    }

    res = bld_2tup_list(hpp, hszp, len, names, values);

    if (!hpp) {
	hp = HAlloc(proc, hsz);
	hszp = NULL;
	hpp = &hp;
	goto restart_bld;
    }

    erts_free(ERTS_ALC_T_TMP, (void *) stat);
    erts_free(ERTS_ALC_T_TMP, (void *) values);
    if (allctr)
	erts_free(ERTS_ALC_T_TMP, (void *) names);

    return res;
}
Esempio n. 2
0
static void
dump_stat_to_stream(FILE *fp, int begin_max_period)
{
    ErtsAlcType_t i, a_max, a_min;

    erts_mtx_lock(&instr_mutex);

    fprintf(fp,
	    "{instr_vsn,%lu}.\n",
	    (unsigned long) ERTS_INSTR_VSN);
    
    update_max_ever_values(&stats->tot, 0, 0);

    fprintf(fp,
	    "{total,[{total,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}]}.\n",
	    (UWord) stats->tot.size,
	    (UWord) stats->tot.max_size,
	    (UWord) stats->tot.max_size_ever,
	    (UWord) stats->tot.blocks,
	    (UWord) stats->tot.max_blocks,
	    (UWord) stats->tot.max_blocks_ever);

    a_max = 0;
    a_min = ~0;
    for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
	if (erts_allctrs_info[i].enabled) {
	    if (a_min > i)
		a_min = i;
	    if (a_max < i)
		a_max = i;
	}
    }

    ASSERT(ERTS_ALC_A_MIN <= a_min && a_min <= ERTS_ALC_A_MAX);
    ASSERT(ERTS_ALC_A_MIN <= a_max && a_max <= ERTS_ALC_A_MAX);
    ASSERT(a_min <= a_max);

    update_max_ever_values(stats->a, a_min, a_max);

    for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
	if (erts_allctrs_info[i].enabled) {
	    fprintf(fp,
		    "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
		    i == a_min ? "{allocators,\n [" : "  ",
		    ERTS_ALC_A2AD(i),
		    (UWord) stats->a[i].size,
		    (UWord) stats->a[i].max_size,
		    (UWord) stats->a[i].max_size_ever,
		    (UWord) stats->a[i].blocks,
		    (UWord) stats->a[i].max_blocks,
		    (UWord) stats->a[i].max_blocks_ever,
		    i == a_max ? "]}.\n" : ",\n");
	}
    }

    update_max_ever_values(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);

    for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
	fprintf(fp,
		"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
		i == ERTS_ALC_C_MIN ? "{classes,\n [" : "  ",
		ERTS_ALC_C2CD(i),
		(UWord) stats->c[i].size,
		(UWord) stats->c[i].max_size,
		(UWord) stats->c[i].max_size_ever,
		(UWord) stats->c[i].blocks,
		(UWord) stats->c[i].max_blocks,
		(UWord) stats->c[i].max_blocks_ever,
		i == ERTS_ALC_C_MAX ? "]}.\n" :  ",\n" );
    }

    update_max_ever_values(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);

    for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
	fprintf(fp,
		"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
		i == ERTS_ALC_N_MIN ? "{types,\n [" : "  ",
		ERTS_ALC_N2TD(i),
		(UWord) stats->n[i].size,
		(UWord) stats->n[i].max_size,
		(UWord) stats->n[i].max_size_ever,
		(UWord) stats->n[i].blocks,
		(UWord) stats->n[i].max_blocks,
		(UWord) stats->n[i].max_blocks_ever,
		i == ERTS_ALC_N_MAX ? "]}.\n" :  ",\n" );
    }

    if (begin_max_period) {
	begin_new_max_period(&stats->tot, 0, 0);
	begin_new_max_period(stats->a, a_min, a_max);
	begin_new_max_period(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
	begin_new_max_period(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
    }

    erts_mtx_unlock(&instr_mutex);

}
Esempio n. 3
0
static void dump_memory_map_to_stream(FILE *fp)
{
    ErtsAlcType_t n;
    MapStatBlock_t *bp;
    int lock = !ERTS_IS_CRASH_DUMPING;
    if (lock) {
	ASSERT(!erts_is_allctr_wrapper_prelocked());
	erts_mtx_lock(&instr_mutex);
    }

    /* Write header */

    fprintf(fp,
	    "{instr_hdr,\n"
	    " %lu,\n"
	    " %lu,\n"
	    " {",
	    (unsigned long) ERTS_INSTR_VSN,
	    (unsigned long) MAP_STAT_BLOCK_HEADER_SIZE);

#if ERTS_ALC_N_MIN != 1
#error ERTS_ALC_N_MIN is not 1
#endif

    for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
	ErtsAlcType_t t = ERTS_ALC_N2T(n);
	ErtsAlcType_t a = ERTS_ALC_T2A(t);
	ErtsAlcType_t c = ERTS_ALC_T2C(t);
	const char *astr;

	if (erts_allctrs_info[a].enabled)
	    astr = ERTS_ALC_A2AD(a);
	else
	    astr = ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);

	fprintf(fp,
		"%s{%s,%s,%s}%s",
		(n == ERTS_ALC_N_MIN) ? "" : "  ",
		ERTS_ALC_N2TD(n),
		astr,
		ERTS_ALC_C2CD(c),
		(n == ERTS_ALC_N_MAX) ? "" : ",\n");
    }

    fprintf(fp, "}}.\n");

    /* Write memory data */
    for (bp = mem_anchor; bp; bp = bp->next) {
	if (is_internal_pid(bp->pid))
	    fprintf(fp,
		    "{%lu, %lu, %lu, {%lu,%lu,%lu}}.\n",
		    (UWord) bp->type_no,
		    (UWord) bp->mem,
		    (UWord) bp->size,
		    (UWord) pid_channel_no(bp->pid),
		    (UWord) pid_number(bp->pid),
		    (UWord) pid_serial(bp->pid));
	else
	    fprintf(fp,
		    "{%lu, %lu, %lu, undefined}.\n",
		    (UWord) bp->type_no,
		    (UWord) bp->mem,
		    (UWord) bp->size);
    }

    if (lock)
	erts_mtx_unlock(&instr_mutex);
}
Esempio n. 4
0
Eterm erts_instr_get_memory_map(Process *proc)
{
    MapStatBlock_t *org_mem_anchor;
    Eterm hdr_tuple, md_list, res;
    Eterm *hp;
    Uint hsz;
    MapStatBlock_t *bp;
#ifdef DEBUG
    Eterm *end_hp;
#endif
    
    if (!erts_instr_memory_map)
	return am_false;

    if (!atoms_initialized)
	init_atoms();
    if (!am_n)
	init_am_n();
    if (!am_c)
	init_am_c();
    if (!am_a)
	init_am_a();

    erts_mtx_lock(&instr_x_mutex);
    erts_mtx_lock(&instr_mutex);

    /* Header size */
    hsz = 5 + 1 + (ERTS_ALC_N_MAX+1-ERTS_ALC_N_MIN)*(1 + 4);

    /* Memory data list */
    for (bp = mem_anchor; bp; bp = bp->next) {
	if (is_internal_pid(bp->pid)) {
#if (_PID_NUM_SIZE - 1 > MAX_SMALL)
	    if (internal_pid_number(bp->pid) > MAX_SMALL)
		hsz += BIG_UINT_HEAP_SIZE;
#endif
#if (_PID_SER_SIZE - 1 > MAX_SMALL)
	    if (internal_pid_serial(bp->pid) > MAX_SMALL)
		hsz += BIG_UINT_HEAP_SIZE;
#endif
	    hsz += 4;
	}

	if ((UWord) bp->mem > MAX_SMALL)
	    hsz += BIG_UINT_HEAP_SIZE;
	if (bp->size > MAX_SMALL)
	    hsz += BIG_UINT_HEAP_SIZE;

	hsz += 5 + 2;
    }

    hsz += 3; /* Root tuple */

    org_mem_anchor = mem_anchor;
    mem_anchor = NULL;

    erts_mtx_unlock(&instr_mutex);

    hp = HAlloc(proc, hsz); /* May end up calling map_stat_alloc() */

    erts_mtx_lock(&instr_mutex);

#ifdef DEBUG
    end_hp = hp + hsz;
#endif

    {	/* Build header */
	ErtsAlcType_t n;
	Eterm type_map;
	Uint *hp2 = hp;
#ifdef DEBUG
	Uint *hp2_end;
#endif

	hp += (ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN)*4;

#ifdef DEBUG
	hp2_end = hp;
#endif

	type_map = make_tuple(hp);
	*(hp++) = make_arityval(ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN);

	for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
	    ErtsAlcType_t t = ERTS_ALC_N2T(n);
	    ErtsAlcType_t a = ERTS_ALC_T2A(t);
	    ErtsAlcType_t c = ERTS_ALC_T2C(t);

	    if (!erts_allctrs_info[a].enabled)
		a = ERTS_ALC_A_SYSTEM;

	    *(hp++) = TUPLE3(hp2, am_n[n], am_a[a], am_c[c]);
	    hp2 += 4;
	}

	ASSERT(hp2 == hp2_end);

	hdr_tuple = TUPLE4(hp,
			   am.instr_hdr,
			   make_small(ERTS_INSTR_VSN),
			   make_small(MAP_STAT_BLOCK_HEADER_SIZE),
			   type_map);

	hp += 5;
    }

    /* Build memory data list */

    for (md_list = NIL, bp = org_mem_anchor; bp; bp = bp->next) {
	Eterm tuple;
	Eterm type;
	Eterm ptr;
	Eterm size;
	Eterm pid;

	if (is_not_internal_pid(bp->pid))
	    pid = am_undefined;
	else {
	    Eterm c;
	    Eterm n;
	    Eterm s;

#if (ERST_INTERNAL_CHANNEL_NO > MAX_SMALL)
#error Oversized internal channel number
#endif
	    c = make_small(ERST_INTERNAL_CHANNEL_NO);

#if (_PID_NUM_SIZE - 1 > MAX_SMALL)
	    if (internal_pid_number(bp->pid) > MAX_SMALL) {
		n = uint_to_big(internal_pid_number(bp->pid), hp);
		hp += BIG_UINT_HEAP_SIZE;
	    }
	    else
#endif
		n = make_small(internal_pid_number(bp->pid));

#if (_PID_SER_SIZE - 1 > MAX_SMALL)
	    if (internal_pid_serial(bp->pid) > MAX_SMALL) {
		s = uint_to_big(internal_pid_serial(bp->pid), hp);
		hp += BIG_UINT_HEAP_SIZE;
	    }
	    else
#endif
		s = make_small(internal_pid_serial(bp->pid));
	    pid = TUPLE3(hp, c, n, s);
	    hp += 4;
	}


#if ERTS_ALC_N_MAX > MAX_SMALL
#error Oversized memory type number
#endif
	type = make_small(bp->type_no);

	if ((UWord) bp->mem > MAX_SMALL) {
	    ptr = uint_to_big((UWord) bp->mem, hp);
	    hp += BIG_UINT_HEAP_SIZE;
	}
	else
	    ptr = make_small((UWord) bp->mem);

	if (bp->size > MAX_SMALL) {
	    size = uint_to_big(bp->size, hp);
	    hp += BIG_UINT_HEAP_SIZE;
	}
	else
	    size = make_small(bp->size);

	tuple = TUPLE4(hp, type, ptr, size, pid);
	hp += 5;

	md_list = CONS(hp, tuple, md_list);
	hp += 2;
    }

    res = TUPLE2(hp, hdr_tuple, md_list);
    
    ASSERT(hp + 3 == end_hp);

    if (mem_anchor) {
	for (bp = mem_anchor; bp->next; bp = bp->next)
	    ;
	ASSERT(org_mem_anchor);
	org_mem_anchor->prev = bp; 
	bp->next = org_mem_anchor;
    }
    else {
	mem_anchor = org_mem_anchor;
    }

    erts_mtx_unlock(&instr_mutex);
    erts_mtx_unlock(&instr_x_mutex);

    return res;
}
Esempio n. 5
0
static void map_stat_pre_lock(void)
{
    erts_mtx_lock(&instr_x_mutex);
    erts_mtx_lock(&instr_mutex);
}
Esempio n. 6
0
static void *
map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    Uint old_size;
    Uint msize;
    void *mptr;
    void *res;

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_lock(&instr_x_mutex);
	erts_mtx_lock(&instr_mutex);
    }

    if (ptr) {
	mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
	old_size = ((MapStatBlock_t *) mptr)->size;
    }
    else {
	mptr = NULL;
	old_size = 0;
    }

    msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
    res = (*real_af->realloc)(n, real_af->extra, mptr, msize);
    if (res) {
	MapStatBlock_t *mb = (MapStatBlock_t *) res;

	mb->size = size;
	mb->type_no = n;
	mb->pid = erts_get_current_pid();

	stat_upd_realloc(n, size, old_size);

	if (mptr != res) {

	    if (mptr) {
		if (mb->prev)
		    mb->prev->next = mb;
		else {
		    ASSERT(mem_anchor == (MapStatBlock_t *) mptr);
		    mem_anchor = mb;
		}
		if (mb->next)
		    mb->next->prev = mb;
	    }
	    else {
		mb->prev = NULL;
		mb->next = mem_anchor;
		if (mem_anchor)
		    mem_anchor->prev = mb;
		mem_anchor = mb;
	    }

	}

	res = (void *) mb->mem;
    }
    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_unlock(&instr_mutex);
	erts_mtx_unlock(&instr_x_mutex);
    }

    return res;
}
Esempio n. 7
0
static void* async_main(void* arg)
{
    AsyncQueue* q = (AsyncQueue*) arg;

#ifdef ERTS_ENABLE_LOCK_CHECK
    {
        char buf[27];
        erts_snprintf(&buf[0], 27, "async %d", q->no);
        erts_lc_set_thread_name(&buf[0]);
    }
#endif

    while(1) {
        ErlAsync* a = async_get(q);

        if (a->port == NIL) { /* TIME TO DIE SIGNAL */
            erts_free(ERTS_ALC_T_ASYNC, (void *) a);
            break;
        }
        else {
            (*a->async_invoke)(a->async_data);
            /* Major problem if the code for async_invoke
               or async_free is removed during a blocking operation */
#ifdef ERTS_SMP
            {
                Port *p;
                p = erts_id2port_sflgs(a->port,
                                       NULL,
                                       0,
                                       ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
                if (!p) {
                    if (a->async_free)
                        (*a->async_free)(a->async_data);
                }
                else {
                    if (async_ready(p, a->async_data)) {
                        if (a->async_free)
                            (*a->async_free)(a->async_data);
                    }
                    async_detach(a->hndl);
                    erts_port_release(p);
                }
                if (a->pdl) {
                    driver_pdl_dec_refc(a->pdl);
                }
                erts_free(ERTS_ALC_T_ASYNC, (void *) a);
            }
#else
            if (a->pdl) {
                driver_pdl_dec_refc(a->pdl);
            }
            erts_mtx_lock(&async_ready_mtx);
            a->next = async_ready_list;
            async_ready_list = a;
            erts_mtx_unlock(&async_ready_mtx);
            sys_async_ready(q->hndl);
#endif
        }
    }

    return NULL;
}
Esempio n. 8
0
static void stat_pre_lock(void)
{
    erts_mtx_lock(&instr_mutex);
}
Esempio n. 9
0
static void mtrace_pre_lock(void)
{
    erts_mtx_lock(&mtrace_op_mutex);
}
Esempio n. 10
0
static ERTS_INLINE void
write_free_entry(byte tag,
		 ErtsAlcType_t x,
		 ErtsAlcType_t y,
		 void *ptr)
{
    erts_mtx_lock(&mtrace_buf_mutex);
    if (erts_mtrace_enabled) {
	Uint32 ti = get_time_inc();

	if (ti != INVALID_TIME_INC
	    && MAKE_TBUF_SZ(UI8_SZ + 2*UI16_SZ + UI_SZ + UI32_SZ)) {
	    Uint16 hdr, t_no = (Uint16) x, ct_no = (Uint16) y;
	    byte *hdrp;
	    int t_no_n, ct_no_n = 0, ptr_n, ti_n;

	    *(tracep++) = tag;

	    hdrp = tracep;
	    tracep += 2;

	    if (tag == ERTS_MT_CRR_FREE_BDY_TAG) {
		PUT_VSZ_UI16(tracep, ct_no_n, ct_no);
	    }
	    PUT_VSZ_UI16(tracep, t_no_n, t_no);
	    PUT_VSZ_UI(  tracep, ptr_n,  ptr);
	    PUT_VSZ_UI32(tracep, ti_n,   ti);

	    hdr = ti_n;

	    hdr <<= UI_MSB_EHF_SZ;
	    hdr |= ptr_n;

	    hdr <<= UI16_MSB_EHF_SZ;
	    hdr |= t_no_n;

	    if (tag == ERTS_MT_CRR_FREE_BDY_TAG) {
		hdr <<= UI16_MSB_EHF_SZ;
		hdr |= ct_no_n;
	    }

	    WRITE_UI16(hdrp, hdr);

#if TRACE_PRINTOUTS
	    print_trace_entry(tag,
			      ct_no, ct_no_n,
			      t_no, t_no_n,
			      (Uint) 0, 0,
			      (Uint) ptr, ptr_n,
			      0, 0,
			      ti, ti_n);
#endif

#ifdef DEBUG
	    check_free_entry(hdrp-1, tracep,
			     tag,
			     ct_no, ct_no_n,
			     t_no, t_no_n,
			     (UWord) ptr, ptr_n,
			     ti, ti_n);
#endif
	}

    }
    erts_mtx_unlock(&mtrace_buf_mutex);
}
Esempio n. 11
0
int
erl_drv_tsd_key_create(char *name, ErlDrvTSDKey *key)
{
    char *name_copy;
    Uint old_used_tsd_keys_len;
    ErlDrvTSDKey res;

    if (!key)
	fatal_error(EINVAL, "erl_drv_tsd_key_create()");

    if (!name)
	name_copy = no_name;
    else {
	name_copy = erts_alloc_fnf(ERTS_ALC_T_DRV_TSD,
				   sizeof(char)*(strlen(name) + 1));
	if (!name_copy) {
	    *key = -1;
	    return ENOMEM;
	}
	sys_strcpy(name_copy, name);
    }

    erts_mtx_lock(&tsd_mtx);

    *key = next_tsd_key;

    if (next_tsd_key < 0)
	res = ENOMEM;
    else {
	res = 0;

	ASSERT(!used_tsd_keys[next_tsd_key]);
	used_tsd_keys[next_tsd_key] = name_copy;

	if (max_used_tsd_key < next_tsd_key)
	    max_used_tsd_key = next_tsd_key;

	if (max_used_tsd_key + 1 >= used_tsd_keys_len) {
	    int i;
	    old_used_tsd_keys_len = used_tsd_keys_len;
	    if (used_tsd_keys_len + ERL_DRV_TSD_KEYS_INC >= INT_MAX) 
		next_tsd_key = -1;
	    else {
		char **new_used_tsd_keys;
		used_tsd_keys_len += ERL_DRV_TSD_KEYS_INC;
		new_used_tsd_keys = erts_realloc_fnf(ERTS_ALC_T_DRV_TSD,
						     used_tsd_keys,
						     (sizeof(char *)
						      * used_tsd_keys_len));
		if (!new_used_tsd_keys)
		    next_tsd_key = -1;
		else {
		    used_tsd_keys = new_used_tsd_keys;
		    for (i = old_used_tsd_keys_len; i < used_tsd_keys_len; i++)
			used_tsd_keys[i] = NULL;
		}
	    }
	}

	if (next_tsd_key >= 0) {
	    do {
		next_tsd_key++;
	    } while (used_tsd_keys[next_tsd_key]);
	}
	ASSERT(next_tsd_key < used_tsd_keys_len);
    }

    erts_mtx_unlock(&tsd_mtx);

    return res;
}
Esempio n. 12
0
void
erts_init_async(void)
{
    async = NULL;
    if (erts_async_max_threads > 0) {
#if ERTS_USE_ASYNC_READY_Q
	ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
#endif
	erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
	char *ptr, thr_name[16];
	size_t tot_size = 0;
	int i;

	tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
	tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
#if ERTS_USE_ASYNC_READY_Q
	tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;
#endif

	ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA,
						 tot_size);

	async = (ErtsAsyncData *) ptr;
	ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));

	async->init.data.no_initialized = 0;
        erts_mtx_init(&async->init.data.mtx, "async_init_mtx", NIL,
            ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
	erts_cnd_init(&async->init.data.cnd);
	erts_atomic_init_nob(&async->init.data.id, 0);

	async->queue = (ErtsAlgndAsyncQ *) ptr;
	ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;

#if ERTS_USE_ASYNC_READY_Q

	qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
	qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
	qinit.notify = erts_notify_check_async_ready_queue;

	async->ready_queue = (ErtsAlgndAsyncReadyQ *) ptr;
	ptr += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;

	for (i = 1; i <= erts_no_schedulers; i++) {
	    ErtsAsyncReadyQ *arq = async_ready_q(i);
#if ERTS_USE_ASYNC_READY_ENQ_MTX
            erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx", make_small(i),
                ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
#endif
	    erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq);
	    qinit.arg = (void *) (SWord) i;
	    erts_thr_q_initialize(&arq->thr_q, &qinit);
	}

#endif

	/* Create async threads... */

	thr_opts.detached = 0;
	thr_opts.suggested_stack_size
	    = erts_async_thread_suggested_stack_size;

	thr_opts.name = thr_name;

	for (i = 0; i < erts_async_max_threads; i++) {
	    ErtsAsyncQ *aq = async_q(i);

            erts_snprintf(thr_opts.name, 16, "async_%d", i+1);

	    erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
	}

	/* Wait for async threads to initialize... */

	erts_mtx_lock(&async->init.data.mtx);
	while (async->init.data.no_initialized != erts_async_max_threads)
	    erts_cnd_wait(&async->init.data.cnd, &async->init.data.mtx);
	erts_mtx_unlock(&async->init.data.mtx);

	erts_mtx_destroy(&async->init.data.mtx);
	erts_cnd_destroy(&async->init.data.cnd);

    }
}
Esempio n. 13
0
/*
 * This function is responsible for enabling, disabling, resetting and
 * gathering data related to microstate accounting.
 *
 * Managed threads and unmanaged threads are handled differently.
 *   - managed threads get a misc_aux job telling them to switch on msacc
 *   - unmanaged have some fields protected by a mutex that has to be taken
 *     before any values can be updated
 *
 * For performance reasons there is also a global value erts_msacc_enabled
 * that controls the state of all threads. Statistics gathering is only on
 * if erts_msacc_enabled && msacc is true.
 */
Eterm
erts_msacc_request(Process *c_p, int action, Eterm *threads)
{
#ifdef ERTS_ENABLE_MSACC
    ErtsMsAcc *msacc =  ERTS_MSACC_TSD_GET();
    ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
    Eterm ref;
    ErtsMSAccReq *msaccrp;
    Eterm *hp;


#ifdef ERTS_MSACC_ALWAYS_ON
    if (action == ERTS_MSACC_ENABLE || action == ERTS_MSACC_DISABLE)
        return THE_NON_VALUE;
#else
    /* take care of double enable, and double disable here */
    if (msacc && action == ERTS_MSACC_ENABLE) {
        return THE_NON_VALUE;
    } else if (!msacc && action == ERTS_MSACC_DISABLE) {
        return THE_NON_VALUE;
    }
#endif

    ref = erts_make_ref(c_p);

    msaccrp = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMSAccReq));
    hp = &msaccrp->ref_heap[0];

    msaccrp->action = action;
    msaccrp->proc = c_p;
    msaccrp->ref = STORE_NC(&hp, NULL, ref);
    msaccrp->req_sched = esdp->no;

#ifdef ERTS_SMP
    *threads = erts_no_schedulers;
    *threads += 1; /* aux thread */
#else
    *threads = 1;
#endif

    erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads);

    erts_proc_add_refc(c_p, *threads);

    if (erts_no_schedulers > 1)
	erts_schedule_multi_misc_aux_work(1,
                                          erts_no_schedulers,
                                          reply_msacc,
                                          (void *) msaccrp);
#ifdef ERTS_SMP
    /* aux thread */
    erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp);
#endif

#ifdef USE_THREADS
    /* Manage unmanaged threads */
    switch (action) {
    case ERTS_MSACC_GATHER: {
        Uint unmanaged_count;
        ErtsMsAcc *msacc, **unmanaged;
        int i = 0;

        /* we copy a list of pointers here so that we do not have to have
           the msacc_mutex when sending messages */
        erts_rwmtx_rlock(&msacc_mutex);
        unmanaged_count = msacc_unmanaged_count;
        unmanaged = erts_alloc(ERTS_ALC_T_MSACC,
                               sizeof(ErtsMsAcc*)*unmanaged_count);

        for (i = 0, msacc = msacc_unmanaged;
             i < unmanaged_count;
             i++, msacc = msacc->next) {
            unmanaged[i] = msacc;
        }
        erts_rwmtx_runlock(&msacc_mutex);

        for (i = 0; i < unmanaged_count; i++) {
            erts_mtx_lock(&unmanaged[i]->mtx);
            if (unmanaged[i]->perf_counter) {
                ErtsSysPerfCounter perf_counter;
                /* if enabled update stats */
                perf_counter = erts_sys_perf_counter();
                unmanaged[i]->perf_counters[unmanaged[i]->state] +=
                    perf_counter - unmanaged[i]->perf_counter;
                unmanaged[i]->perf_counter = perf_counter;
            }
            erts_mtx_unlock(&unmanaged[i]->mtx);
            send_reply(unmanaged[i],msaccrp);
        }
        erts_free(ERTS_ALC_T_MSACC,unmanaged);
        /* We have just sent unmanaged_count messages, so bump no of threads */
        *threads += unmanaged_count;
        break;
    }
    case ERTS_MSACC_RESET: {
        ErtsMsAcc *msacc;
        erts_rwmtx_rlock(&msacc_mutex);
        for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next)
            erts_msacc_reset(msacc);
        erts_rwmtx_runlock(&msacc_mutex);
        break;
    }
    case ERTS_MSACC_ENABLE: {
        erts_rwmtx_rlock(&msacc_mutex);
        for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) {
            erts_mtx_lock(&msacc->mtx);
            msacc->perf_counter = erts_sys_perf_counter();
            /* we assume the unmanaged thread is sleeping */
            msacc->state = ERTS_MSACC_STATE_SLEEP;
            erts_mtx_unlock(&msacc->mtx);
        }
        erts_rwmtx_runlock(&msacc_mutex);
        break;
    }
    case ERTS_MSACC_DISABLE: {
        ErtsSysPerfCounter perf_counter;
        erts_rwmtx_rlock(&msacc_mutex);
        /* make sure to update stats with latest results */
        for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) {
            erts_mtx_lock(&msacc->mtx);
            perf_counter = erts_sys_perf_counter();
            msacc->perf_counters[msacc->state] += perf_counter - msacc->perf_counter;
            msacc->perf_counter = 0;
            erts_mtx_unlock(&msacc->mtx);
        }
        erts_rwmtx_runlock(&msacc_mutex);
        break;
    }
    default: { ASSERT(0); }
    }

#endif

    *threads = make_small(*threads);

    reply_msacc((void *) msaccrp);

#ifndef ERTS_MSACC_ALWAYS_ON
    /* enable/disable the global value */
    if (action == ERTS_MSACC_ENABLE) {
        erts_msacc_enabled = 1;
    } else if (action == ERTS_MSACC_DISABLE) {
        erts_msacc_enabled = 0;
    }
#endif

    return ref;
#else
    return THE_NON_VALUE;
#endif
}