static erts_lc_locked_locks_t * create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); l_lcks->required.first = NULL; l_lcks->required.last = NULL; l_lcks->locked.first = NULL; l_lcks->locked.last = NULL; l_lcks->prev = NULL; lc_lock(); l_lcks->next = erts_locked_locks; if (erts_locked_locks) erts_locked_locks->prev = l_lcks; erts_locked_locks = l_lcks; lc_unlock(); erts_tsd_set(locks_key, (void *) l_lcks); return l_lcks; }
static erts_lc_locked_locks_t * create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) lc_abort(); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) lc_abort(); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); l_lcks->required.first = NULL; l_lcks->required.last = NULL; l_lcks->locked.first = NULL; l_lcks->locked.last = NULL; l_lcks->prev = NULL; lc_lock(); l_lcks->next = erts_locked_locks; if (erts_locked_locks) erts_locked_locks->prev = l_lcks; erts_locked_locks = l_lcks; lc_unlock(); erts_tsd_set(locks_key, (void *) l_lcks); return l_lcks; }
/* * Calller _must_ yield if we return 0 */ int erts_try_seize_code_write_permission(Process* c_p) { int success; #ifdef ERTS_SMP ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ #endif ASSERT(c_p != NULL); erts_smp_mtx_lock(&code_write_permission_mtx); success = (code_writing_process == NULL); if (success) { code_writing_process = c_p; #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 1); #endif } else { /* Already locked */ struct code_write_queue_item* qitem; ASSERT(code_writing_process != c_p); qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem)); qitem->p = c_p; erts_proc_inc_refc(c_p); qitem->next = code_write_queue; code_write_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } erts_smp_mtx_unlock(&code_write_permission_mtx); return success; }
static lc_thread_t * create_thread_data(char *thread_name) { lc_thread_t *thr = malloc(sizeof(lc_thread_t)); if (!thr) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); thr->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!thr->thread_name) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); thr->emu_thread = 0; thr->tid = erts_thr_self(); thr->required.first = NULL; thr->required.last = NULL; thr->locked.first = NULL; thr->locked.last = NULL; thr->prev = NULL; thr->free_blocks = NULL; thr->chunks = NULL; sys_memzero(&thr->matrix, sizeof(thr->matrix)); lc_lock_threads(); thr->next = lc_threads; if (lc_threads) lc_threads->prev = thr; lc_threads = thr; lc_unlock_threads(); erts_tsd_set(locks_key, (void *) thr); return thr; }
static ERTS_INLINE void return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd) { if (tpd->is_temporary) { erts_tsd_set(erts_thr_prgr_data_key__, NULL); erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd); } }
static void init_tmp_thr_prgr_data(ErtsThrPrgrData *tpd) { tpd->id = -1; tpd->is_managed = 0; tpd->is_blocking = 0; tpd->is_temporary = 1; #ifdef ERTS_ENABLE_LOCK_CHECK tpd->is_delaying = 0; #endif erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd); }
void erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks) { ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL); int is_blocking = 0; if (tpd) { if (!tpd->is_temporary) erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Double register of thread\n", __FILE__, __LINE__, __func__); is_blocking = tpd->is_blocking; return_tmp_thr_prgr_data(tpd); } /* * We only allocate the part up to the leader field * which is the first field only used by managed threads */ tpd = erts_alloc(ERTS_ALC_T_THR_PRGR_DATA, offsetof(ErtsThrPrgrData, leader)); tpd->id = (int) erts_atomic32_inc_read_nob(&intrnl->misc.data.unmanaged_id); tpd->is_managed = 0; tpd->is_blocking = is_blocking; tpd->is_temporary = 0; #ifdef ERTS_ENABLE_LOCK_CHECK tpd->is_delaying = 0; #endif ASSERT(tpd->id >= 0); if (tpd->id >= intrnl->unmanaged.no) erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Too many unmanaged registered threads\n", __FILE__, __LINE__, __func__); init_wakeup_request_array(&tpd->wakeup_request[0]); erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd); ASSERT(callbacks->wakeup); intrnl->unmanaged.callbacks[tpd->id] = *callbacks; }
void erts_release_code_write_permission(void) { erts_smp_mtx_lock(&code_write_permission_mtx); ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); while (code_write_queue != NULL) { /* unleash the entire herd */ struct code_write_queue_item* qitem = code_write_queue; erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(qitem->p)) { erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS); } erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); code_write_queue = qitem->next; erts_proc_dec_refc(qitem->p); erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem); } code_writing_process = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 0); #endif erts_smp_mtx_unlock(&code_write_permission_mtx); }
void erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp, ErtsThrPrgrCallbacks *callbacks, int pref_wakeup) { ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL); int is_blocking = 0, managed; if (tpd) { if (!tpd->is_temporary) erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Double register of thread\n", __FILE__, __LINE__, __func__); is_blocking = tpd->is_blocking; return_tmp_thr_prgr_data(tpd); } if (esdp) tpd = &esdp->thr_progress_data; else tpd = erts_alloc(ERTS_ALC_T_THR_PRGR_DATA, sizeof(ErtsThrPrgrData)); if (pref_wakeup && !erts_atomic32_xchg_nob(&intrnl->misc.data.pref_wakeup_used, 1)) tpd->id = 0; else if (esdp) tpd->id = (int) esdp->no; else tpd->id = erts_atomic32_inc_read_nob(&intrnl->misc.data.managed_id); ASSERT(tpd->id >= 0); if (tpd->id >= intrnl->managed.no) erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Too many managed registered threads\n", __FILE__, __LINE__, __func__); tpd->is_managed = 1; tpd->is_blocking = is_blocking; tpd->is_temporary = 0; #ifdef ERTS_ENABLE_LOCK_CHECK tpd->is_delaying = 1; #endif init_wakeup_request_array(&tpd->wakeup_request[0]); ERTS_THR_PROGRESS_STATE_DEBUG_INIT(tpd->id); tpd->leader = 0; tpd->active = 1; tpd->confirmed = 0; tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING; erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd); erts_atomic32_inc_nob(&intrnl->misc.data.lflgs); ASSERT(callbacks->wakeup); ASSERT(callbacks->prepare_wait); ASSERT(callbacks->wait); ASSERT(callbacks->finalize_wait); intrnl->managed.callbacks[tpd->id] = *callbacks; callbacks->prepare_wait(callbacks->arg); managed = erts_atomic32_inc_read_relb(&intrnl->misc.data.managed_count); if (managed != intrnl->managed.no) { /* Wait until all managed threads have registered... */ do { callbacks->wait(callbacks->arg); callbacks->prepare_wait(callbacks->arg); managed = erts_atomic32_read_acqb(&intrnl->misc.data.managed_count); } while (managed != intrnl->managed.no); } else { int id; /* All managed threads have registered; lets go... */ for (id = 0; id < managed; id++) if (id != tpd->id) wakeup_managed(id); } callbacks->finalize_wait(callbacks->arg); }