static void reply_msacc(void *vmsaccrp) { ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET(); ErtsMSAccReq *msaccrp = (ErtsMSAccReq *) vmsaccrp; ASSERT(!msacc || !msacc->unmanaged); if (msaccrp->action == ERTS_MSACC_ENABLE && !msacc) { msacc = get_msacc(); msacc->perf_counter = erts_sys_perf_counter(); msacc->state = ERTS_MSACC_STATE_OTHER; ERTS_MSACC_TSD_SET(msacc); } else if (msaccrp->action == ERTS_MSACC_DISABLE && msacc) { ERTS_MSACC_TSD_SET(NULL); } else if (msaccrp->action == ERTS_MSACC_RESET) { msacc = msacc ? msacc : get_msacc(); erts_msacc_reset(msacc); } else if (msaccrp->action == ERTS_MSACC_GATHER && !msacc) { msacc = get_msacc(); } ASSERT(!msacc || !msacc->unmanaged); send_reply(msacc, msaccrp); erts_proc_dec_refc(msaccrp->proc); if (erts_smp_atomic32_dec_read_nob(&msaccrp->refc) == 0) erts_free(ERTS_ALC_T_MSACC, vmsaccrp); }
void erts_msacc_init_thread(char *type, int id, int managed) { ErtsMsAcc *msacc; msacc = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc) + sizeof(ErtsMsAccPerfCntr) * ERTS_MSACC_STATE_COUNT); msacc->type = strdup(type); msacc->id = make_small(id); msacc->unmanaged = !managed; msacc->tid = erts_thr_self(); msacc->perf_counter = 0; #ifdef USE_THREADS erts_rwmtx_rwlock(&msacc_mutex); if (!managed) { erts_mtx_init(&msacc->mtx,"msacc_unmanaged_mutex"); msacc->next = msacc_unmanaged; msacc_unmanaged = msacc; msacc_unmanaged_count++; ERTS_MSACC_TSD_SET(msacc); } else { msacc->next = msacc_managed; msacc_managed = msacc; } erts_rwmtx_rwunlock(&msacc_mutex); #else msacc_managed = msacc; #endif erts_msacc_reset(msacc); #ifdef ERTS_MSACC_ALWAYS_ON ERTS_MSACC_TSD_SET(msacc); msacc->perf_counter = erts_sys_perf_counter(); msacc->state = ERTS_MSACC_STATE_OTHER; #endif }
/* * This function is responsible for enabling, disabling, resetting and * gathering data related to microstate accounting. * * Managed threads and unmanaged threads are handled differently. * - managed threads get a misc_aux job telling them to switch on msacc * - unmanaged have some fields protected by a mutex that has to be taken * before any values can be updated * * For performance reasons there is also a global value erts_msacc_enabled * that controls the state of all threads. Statistics gathering is only on * if erts_msacc_enabled && msacc is true. */ Eterm erts_msacc_request(Process *c_p, int action, Eterm *threads) { #ifdef ERTS_ENABLE_MSACC ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET(); ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); Eterm ref; ErtsMSAccReq *msaccrp; Eterm *hp; #ifdef ERTS_MSACC_ALWAYS_ON if (action == ERTS_MSACC_ENABLE || action == ERTS_MSACC_DISABLE) return THE_NON_VALUE; #else /* take care of double enable, and double disable here */ if (msacc && action == ERTS_MSACC_ENABLE) { return THE_NON_VALUE; } else if (!msacc && action == ERTS_MSACC_DISABLE) { return THE_NON_VALUE; } #endif ref = erts_make_ref(c_p); msaccrp = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMSAccReq)); hp = &msaccrp->ref_heap[0]; msaccrp->action = action; msaccrp->proc = c_p; msaccrp->ref = STORE_NC(&hp, NULL, ref); msaccrp->req_sched = esdp->no; #ifdef ERTS_SMP *threads = erts_no_schedulers; *threads += 1; /* aux thread */ #else *threads = 1; #endif erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); erts_proc_add_refc(c_p, *threads); if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_msacc, (void *) msaccrp); #ifdef ERTS_SMP /* aux thread */ erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp); #endif #ifdef USE_THREADS /* Manage unmanaged threads */ switch (action) { case ERTS_MSACC_GATHER: { Uint unmanaged_count; ErtsMsAcc *msacc, **unmanaged; int i = 0; /* we copy a list of pointers here so that we do not have to have the msacc_mutex when sending messages */ erts_rwmtx_rlock(&msacc_mutex); unmanaged_count = msacc_unmanaged_count; unmanaged = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc*)*unmanaged_count); for (i = 0, msacc = msacc_unmanaged; i < unmanaged_count; i++, msacc = msacc->next) { unmanaged[i] = msacc; } erts_rwmtx_runlock(&msacc_mutex); for (i = 0; i < unmanaged_count; i++) { erts_mtx_lock(&unmanaged[i]->mtx); if (unmanaged[i]->perf_counter) { ErtsSysPerfCounter perf_counter; /* if enabled update stats */ perf_counter = erts_sys_perf_counter(); unmanaged[i]->perf_counters[unmanaged[i]->state] += perf_counter - unmanaged[i]->perf_counter; unmanaged[i]->perf_counter = perf_counter; } erts_mtx_unlock(&unmanaged[i]->mtx); send_reply(unmanaged[i],msaccrp); } erts_free(ERTS_ALC_T_MSACC,unmanaged); /* We have just sent unmanaged_count messages, so bump no of threads */ *threads += unmanaged_count; break; } case ERTS_MSACC_RESET: { ErtsMsAcc *msacc; erts_rwmtx_rlock(&msacc_mutex); for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) erts_msacc_reset(msacc); erts_rwmtx_runlock(&msacc_mutex); break; } case ERTS_MSACC_ENABLE: { erts_rwmtx_rlock(&msacc_mutex); for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) { erts_mtx_lock(&msacc->mtx); msacc->perf_counter = erts_sys_perf_counter(); /* we assume the unmanaged thread is sleeping */ msacc->state = ERTS_MSACC_STATE_SLEEP; erts_mtx_unlock(&msacc->mtx); } erts_rwmtx_runlock(&msacc_mutex); break; } case ERTS_MSACC_DISABLE: { ErtsSysPerfCounter perf_counter; erts_rwmtx_rlock(&msacc_mutex); /* make sure to update stats with latest results */ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) { erts_mtx_lock(&msacc->mtx); perf_counter = erts_sys_perf_counter(); msacc->perf_counters[msacc->state] += perf_counter - msacc->perf_counter; msacc->perf_counter = 0; erts_mtx_unlock(&msacc->mtx); } erts_rwmtx_runlock(&msacc_mutex); break; } default: { ASSERT(0); } } #endif *threads = make_small(*threads); reply_msacc((void *) msaccrp); #ifndef ERTS_MSACC_ALWAYS_ON /* enable/disable the global value */ if (action == ERTS_MSACC_ENABLE) { erts_msacc_enabled = 1; } else if (action == ERTS_MSACC_DISABLE) { erts_msacc_enabled = 0; } #endif return ref; #else return THE_NON_VALUE; #endif }