void erts_end_staging_ranges(int commit) { if (commit) { Sint i; ErtsCodeIndex src = erts_active_code_ix(); ErtsCodeIndex dst = erts_staging_code_ix(); Range* mp; Sint num_inserted; mp = r[dst].modules; num_inserted = write_ptr - mp; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ write_ptr->start = rp->start; erts_atomic_init_nob(&write_ptr->end, (erts_aint_t)(RANGE_END(rp))); write_ptr++; } } /* * There are num_inserted new range entries (unsorted) at the * beginning of the modules array, followed by the old entries * (sorted). We must now sort the entire array. */ r[dst].n = write_ptr - mp; if (num_inserted > 1) { qsort(mp, r[dst].n, sizeof(Range), (int (*)(const void *, const void *)) rangecompare); } else if (num_inserted == 1) { /* Sift the new range into place. This is faster than qsort(). */ Range t = mp[0]; for (i = 0; i < r[dst].n-1 && t.start > mp[i+1].start; i++) { mp[i] = mp[i+1]; } mp[i] = t; } r[dst].modules = mp; CHECK(&r[dst]); erts_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + r[dst].n / 2)); if (r[dst].allocated * 2 > erts_dump_num_lit_areas) { erts_dump_num_lit_areas *= 2; erts_dump_lit_areas = (ErtsLiteralArea **) erts_realloc(ERTS_ALC_T_CRASH_DUMP, (void *) erts_dump_lit_areas, erts_dump_num_lit_areas * sizeof(ErtsLiteralArea*)); } } }
void erts_thr_progress_unblock(void) { erts_tse_t *event; int id, break_id, sz, wakeup; ErtsThrPrgrData *tpd = thr_prgr_data(NULL); ASSERT(tpd->is_blocking); if (--tpd->is_blocking) return; sz = intrnl->managed.no; wakeup = 1; if (!tpd->is_managed) id = break_id = tpd->id < 0 ? 0 : tpd->id % sz; else { break_id = tpd->id; id = break_id + 1; if (id >= sz) id = 0; if (id == break_id) wakeup = 0; erts_atomic32_inc_nob(&intrnl->misc.data.block_count); } event = ((erts_tse_t *) erts_atomic_read_nob(&intrnl->misc.data.blocker_event)); ASSERT(event); erts_atomic_set_nob(&intrnl->misc.data.blocker_event, ERTS_AINT_NULL); erts_atomic32_read_bor_relb(&intrnl->misc.data.block_count, ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING); #if ERTS_THR_PRGR_PRINT_BLOCKERS erts_fprintf(stderr, "unblock(%d)\n", tpd->id); #endif erts_atomic32_read_band_mb(&intrnl->misc.data.lflgs, ~ERTS_THR_PRGR_LFLG_BLOCK); if (wakeup) { do { ErtsThrPrgrVal tmp; tmp = read_nob(&intrnl->thr[id].data.current); if (tmp != ERTS_THR_PRGR_VAL_WAITING) wakeup_managed(id); if (++id >= sz) id = 0; } while (id != break_id); } return_tmp_thr_prgr_data(tpd); erts_tse_return(event); }
static void table_updater(void* data) { HashTable* old_table; HashTable* new_table; old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table); new_table = (HashTable *) data; ASSERT(new_table->num_to_delete == 0); erts_atomic_set_nob(&the_hash_table, (erts_aint_t)new_table); append_to_delete_queue(old_table); erts_schedule_thr_prgr_later_op(table_deleter, old_table, &old_table->thr_prog_op); release_update_permission(1); }
static erts_aint32_t thr_progress_block(ErtsThrPrgrData *tpd, int wait) { erts_tse_t *event = NULL; /* Remove erroneous warning... sigh... */ erts_aint32_t lflgs, bc; if (tpd->is_blocking++) return (erts_aint32_t) 0; while (1) { lflgs = erts_atomic32_read_bor_nob(&intrnl->misc.data.lflgs, ERTS_THR_PRGR_LFLG_BLOCK); if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK) block_thread(tpd); else break; } #if ERTS_THR_PRGR_PRINT_BLOCKERS erts_fprintf(stderr, "block(%d)\n", tpd->id); #endif ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&intrnl->misc.data.blocker_event)); if (wait) { event = erts_tse_fetch(); erts_tse_reset(event); erts_atomic_set_nob(&intrnl->misc.data.blocker_event, (erts_aint_t) event); } if (tpd->is_managed) erts_atomic32_dec_nob(&intrnl->misc.data.block_count); bc = erts_atomic32_read_band_mb(&intrnl->misc.data.block_count, ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING); bc &= ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING; if (wait) { while (bc != 0) { erts_tse_wait(event); erts_tse_reset(event); bc = erts_atomic32_read_acqb(&intrnl->misc.data.block_count); } } return bc; }
void erts_flxctr_init(ErtsFlxCtr* c, int is_decentralized, Uint nr_of_counters, ErtsAlcType_t alloc_type) { ASSERT(nr_of_counters <= ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE); c->is_decentralized = is_decentralized; c->nr_of_counters = nr_of_counters; if (c->is_decentralized) { ErtsFlxCtrDecentralizedCtrArray* array = create_decentralized_ctr_array(alloc_type, nr_of_counters); erts_atomic_set_nob(&array->snapshot_status, ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING); erts_atomic_init_nob(&c->u.counters_ptr, (Sint)array); ASSERT(((Uint)array->array) % ERTS_CACHE_LINE_SIZE == 0); } else { int i; for (i = 0; i < nr_of_counters; i++) { erts_atomic_init_nob(&c->u.counters[i], 0); } } }
static Range* find_range(BeamInstr* pc) { ErtsCodeIndex active = erts_active_code_ix(); Range* low = r[active].modules; Range* high = low + r[active].n; Range* mid = (Range *) erts_atomic_read_nob(&r[active].mid); CHECK(&r[active]); while (low < high) { if (pc < mid->start) { high = mid; } else if (pc >= RANGE_END(mid)) { low = mid + 1; } else { erts_atomic_set_nob(&r[active].mid, (erts_aint_t) mid); return mid; } mid = low + (high-low) / 2; } return 0; }
void erts_remove_from_ranges(BeamInstr* code) { Range* rp = find_range(code); erts_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); }