Beispiel #1
0
static HashTable*
next_to_delete(void)
{
    HashTable* table;

    erts_mtx_lock(&delete_queue_mtx);
    table = delete_queue_head;
    if (table) {
        if (erts_atomic_read_nob(&table->refc)) {
            /*
             * This hash table is still referenced. Hash tables
             * must be deleted in order, so we return a NULL
             * pointer.
             */
            table = NULL;
        } else {
            /*
             * Remove the first hash table from the queue.
             */
            delete_queue_head = table->delete_next;
            if (delete_queue_head == NULL) {
                delete_queue_tail = &delete_queue_head;
            }
        }
    }
    erts_mtx_unlock(&delete_queue_mtx);
    return table;
}
Beispiel #2
0
BIF_RETTYPE persistent_term_info_0(BIF_ALIST_0)
{
    HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
    TrapData* trap_data;
    Eterm res = NIL;
    Eterm magic_ref;
    Binary* mbp;

    magic_ref = alloc_trap_data(BIF_P);
    mbp = erts_magic_ref2bin(magic_ref);
    trap_data = ERTS_MAGIC_BIN_DATA(mbp);
    trap_data->table = hash_table;
    trap_data->idx = 0;
    trap_data->remaining = hash_table->num_entries;
    trap_data->memory = 0;
    res = do_info(BIF_P, trap_data);
    if (trap_data->remaining == 0) {
        BUMP_REDS(BIF_P, hash_table->num_entries);
        trap_data->table = NULL; /* Prevent refc decrement */
        BIF_RET(res);
    } else {
        /*
         * Increment the ref counter to prevent an update operation (by put/2
         * or erase/1) to delete this hash table.
         */
        erts_atomic_inc_nob(&hash_table->refc);
        BUMP_ALL_REDS(BIF_P);
        BIF_TRAP2(&persistent_term_info_export, BIF_P, magic_ref, res);
    }
}
Beispiel #3
0
void
erts_thr_progress_unblock(void)
{
    erts_tse_t *event;
    int id, break_id, sz, wakeup;
    ErtsThrPrgrData *tpd = thr_prgr_data(NULL);

    ASSERT(tpd->is_blocking);
    if (--tpd->is_blocking)
	return;

    sz = intrnl->managed.no;

    wakeup = 1;
    if (!tpd->is_managed)
	id = break_id = tpd->id < 0 ? 0 : tpd->id % sz;
    else {
	break_id = tpd->id;
	id = break_id + 1;
	if (id >= sz)
	    id = 0;
	if (id == break_id)
	    wakeup = 0;
	erts_atomic32_inc_nob(&intrnl->misc.data.block_count);
    }

    event = ((erts_tse_t *)
	     erts_atomic_read_nob(&intrnl->misc.data.blocker_event));
    ASSERT(event);
    erts_atomic_set_nob(&intrnl->misc.data.blocker_event, ERTS_AINT_NULL);

    erts_atomic32_read_bor_relb(&intrnl->misc.data.block_count,
				ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING);
#if ERTS_THR_PRGR_PRINT_BLOCKERS
    erts_fprintf(stderr, "unblock(%d)\n", tpd->id);
#endif
    erts_atomic32_read_band_mb(&intrnl->misc.data.lflgs,
			       ~ERTS_THR_PRGR_LFLG_BLOCK);

    if (wakeup) {
	do {
	    ErtsThrPrgrVal tmp;
	    tmp = read_nob(&intrnl->thr[id].data.current);
	    if (tmp != ERTS_THR_PRGR_VAL_WAITING)
		wakeup_managed(id);
	    if (++id >= sz)
		id = 0;
	} while (id != break_id);
    }

    return_tmp_thr_prgr_data(tpd);
    erts_tse_return(event);
}
Beispiel #4
0
BIF_RETTYPE persistent_term_get_1(BIF_ALIST_1)
{
    Eterm key = BIF_ARG_1;
    HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
    Uint entry_index;
    Eterm term;

    entry_index = lookup(hash_table, key);
    term = hash_table->term[entry_index];
    if (is_boxed(term)) {
        ASSERT(is_tuple_arity(term, 2));
        BIF_RET(tuple_val(term)[2]);
    }
    BIF_ERROR(BIF_P, BADARG);
}
Beispiel #5
0
static ERTS_INLINE int
block_count_dec(void)
{
    erts_aint32_t block_count;
    block_count = erts_atomic32_dec_read_mb(&intrnl->misc.data.block_count);
    if (block_count == 0) {
	erts_tse_t *event;
	event = ((erts_tse_t*)
		 erts_atomic_read_nob(&intrnl->misc.data.blocker_event));
	if (event)
	    erts_tse_set(event);
	return 1;
    }

    return (block_count & ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING) == 0;
}
Beispiel #6
0
BIF_RETTYPE persistent_term_get_2(BIF_ALIST_2)
{
    Eterm key = BIF_ARG_1;
    Eterm result = BIF_ARG_2;
    HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
    Uint entry_index;
    Eterm term;

    entry_index = lookup(hash_table, key);
    term = hash_table->term[entry_index];
    if (is_boxed(term)) {
        ASSERT(is_tuple_arity(term, 2));
        result = tuple_val(term)[2];
    }
    BIF_RET(result);
}
Beispiel #7
0
static void
table_updater(void* data)
{
    HashTable* old_table;
    HashTable* new_table;

    old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
    new_table = (HashTable *) data;
    ASSERT(new_table->num_to_delete == 0);
    erts_atomic_set_nob(&the_hash_table, (erts_aint_t)new_table);
    append_to_delete_queue(old_table);
    erts_schedule_thr_prgr_later_op(table_deleter,
                                    old_table,
                                    &old_table->thr_prog_op);
    release_update_permission(1);
}
Beispiel #8
0
BIF_RETTYPE erts_internal_erase_persistent_terms_0(BIF_ALIST_0)
{
    HashTable* old_table;
    HashTable* new_table;

    if (!try_seize_update_permission(BIF_P)) {
	ERTS_BIF_YIELD0(bif_export[BIF_erts_internal_erase_persistent_terms_0],
                        BIF_P);
    }
    old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
    old_table->first_to_delete = 0;
    old_table->num_to_delete = old_table->allocated;
    new_table = create_initial_table();
    erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
    suspend_updater(BIF_P);
    ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
}
Beispiel #9
0
static erts_aint32_t
thr_progress_block(ErtsThrPrgrData *tpd, int wait)
{
    erts_tse_t *event = NULL; /* Remove erroneous warning... sigh... */
    erts_aint32_t lflgs, bc;

    if (tpd->is_blocking++)
	return (erts_aint32_t) 0;

    while (1) {
	lflgs = erts_atomic32_read_bor_nob(&intrnl->misc.data.lflgs,
					   ERTS_THR_PRGR_LFLG_BLOCK);
	if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
	    block_thread(tpd);
	else
	    break;
    }

#if ERTS_THR_PRGR_PRINT_BLOCKERS
    erts_fprintf(stderr, "block(%d)\n", tpd->id);
#endif

    ASSERT(ERTS_AINT_NULL
	   == erts_atomic_read_nob(&intrnl->misc.data.blocker_event));

    if (wait) {
	event = erts_tse_fetch();
	erts_tse_reset(event);
	erts_atomic_set_nob(&intrnl->misc.data.blocker_event,
			    (erts_aint_t) event);
    }
    if (tpd->is_managed)
	erts_atomic32_dec_nob(&intrnl->misc.data.block_count);
    bc = erts_atomic32_read_band_mb(&intrnl->misc.data.block_count,
				    ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING);
    bc &= ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING;
    if (wait) {
	while (bc != 0) {
	    erts_tse_wait(event);
	    erts_tse_reset(event);
	    bc = erts_atomic32_read_acqb(&intrnl->misc.data.block_count);
	}
    }
    return bc;

}
Beispiel #10
0
static void
thr_prg_wake_up_and_count(void* bin_p)
{
    Binary* bin = bin_p;
    DecentralizedReadSnapshotInfo* info = ERTS_MAGIC_BIN_DATA(bin);
    Process* p = info->process;
    ErtsFlxCtrDecentralizedCtrArray* array = info->array;
    ErtsFlxCtrDecentralizedCtrArray* next = info->next_array;
    int i, sched;
    /* Reset result array */
    for (i = 0; i < info->nr_of_counters; i++) {
        info->result[i] = 0;
    }
    /* Read result from snapshot */
    for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) {
        for (i = 0; i < info->nr_of_counters; i++) {
            info->result[i] = info->result[i] +
                erts_atomic_read_nob(&array->array[sched].counters[i]);
        }
    }
    /* Update the next decentralized counter array */
    for (i = 0; i < info->nr_of_counters; i++) {
        erts_atomic_add_nob(&next->array[0].counters[i], info->result[i]);
    }
    /* Announce that the snapshot is done */
    {
    Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING;
    if (expected != erts_atomic_cmpxchg_mb(&next->snapshot_status,
                                           ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING,
                                           expected)) {
        /* The CAS failed which means that this thread need to free the next array. */
        erts_free(info->alloc_type, next->block_start);
    }
    }
    /* Resume the process that requested the snapshot */
    erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
    if (!ERTS_PROC_IS_EXITING(p)) {
        erts_resume(p, ERTS_PROC_LOCK_STATUS);
    }
    /* Free the memory that is no longer needed */
    erts_free(info->alloc_type, array->block_start);
    erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
    erts_proc_dec_refc(p);
    erts_bin_release(bin);
}
Beispiel #11
0
Uint erts_process_memory(Process *p, int incl_msg_inq) {
  ErtsMessage *mp;
  Uint size = 0;
  struct saved_calls *scb;
  size += sizeof(Process);

  if (incl_msg_inq)
      ERTS_MSGQ_MV_INQ2PRIVQ(p);

  erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size);
  erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size);
  size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
  if (p->abandoned_heap)
      size += (p->hend - p->heap) * sizeof(Eterm);
  if (p->old_hend && p->old_heap)
    size += (p->old_hend - p->old_heap) * sizeof(Eterm);


  size += p->msg.len * sizeof(ErtsMessage);

  for (mp = p->msg.first; mp; mp = mp->next)
    if (mp->data.attached)
      size += erts_msg_attached_data_size(mp)*sizeof(Eterm);

  if (p->arg_reg != p->def_arg_reg) {
    size += p->arity * sizeof(p->arg_reg[0]);
  }

  if (erts_atomic_read_nob(&p->psd) != (erts_aint_t) NULL)
    size += sizeof(ErtsPSD);

  scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
  if (scb) {
    size += (sizeof(struct saved_calls)
	     + (scb->len-1) * sizeof(scb->ct[0]));
  }

  size += erts_dicts_mem_size(p);
  return size;
}
Beispiel #12
0
static Range*
find_range(BeamInstr* pc)
{
    ErtsCodeIndex active = erts_active_code_ix();
    Range* low = r[active].modules;
    Range* high = low + r[active].n;
    Range* mid = (Range *) erts_atomic_read_nob(&r[active].mid);

    CHECK(&r[active]);
    while (low < high) {
	if (pc < mid->start) {
	    high = mid;
	} else if (pc >= RANGE_END(mid)) {
	    low = mid + 1;
	} else {
	    erts_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
	    return mid;
	}
	mid = low + (high-low) / 2;
    }
    return 0;
}
Beispiel #13
0
void
erts_init_persistent_dumping(void)
{
    HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
    ErtsLiteralArea** area_p;
    Uint i;

    /*
     * Overwrite the array of Eterms in the current hash table
     * with pointers to literal areas.
     */

    erts_persistent_areas = (ErtsLiteralArea **) hash_table->term;
    erts_num_persistent_areas = hash_table->num_entries;
    area_p = erts_persistent_areas;
    for (i = 0; i < hash_table->allocated; i++) {
        Eterm term = hash_table->term[i];

        if (is_boxed(term)) {
            *area_p++ = term_to_area(term);
        }
    }
}
    return data;
}

static ERTS_INLINE void
enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
			      erts_sspa_blk_t *this,
			      int cinit)
{
    erts_aint_t itmp;
    erts_sspa_blk_t *enq;

    erts_atomic_init_nob(&this->next_atmc, ERTS_AINT_NULL);
    /* Enqueue at end of list... */

    enq = (erts_sspa_blk_t *) erts_atomic_read_nob(&chdr->tail.data.last);
    itmp = erts_atomic_cmpxchg_relb(&enq->next_atmc,
				    (erts_aint_t) this,
				    ERTS_AINT_NULL);
    if (itmp == ERTS_AINT_NULL) {
	/* We are required to move last pointer */
#ifdef DEBUG
	ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->next_atmc));
	ASSERT(((erts_aint_t) enq)
	       == erts_atomic_xchg_relb(&chdr->tail.data.last,
				      (erts_aint_t) this));
#else
	erts_atomic_set_relb(&chdr->tail.data.last, (erts_aint_t) this);
#endif
    }
    else {
    return data;
}

static ERTS_INLINE erts_aint_t
enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
			      erts_sspa_blk_t *this,
			      int want_last)
{
    erts_aint_t ilast, itmp;

    erts_atomic_init_nob(&this->next_atmc, ERTS_AINT_NULL);

    /* Enqueue at end of list... */

    ilast = erts_atomic_read_nob(&chdr->tail.data.last);
    while (1) {
	erts_sspa_blk_t *last = (erts_sspa_blk_t *) ilast;
	itmp = erts_atomic_cmpxchg_mb(&last->next_atmc,
				       (erts_aint_t) this,
				       ERTS_AINT_NULL);
	if (itmp == ERTS_AINT_NULL)
	    break;
	ilast = itmp;
    }

    /* Move last pointer forward... */
    while (1) {
	erts_aint_t itmp;
	if (want_last) {
	    if (erts_atomic_read_rb(&this->next_atmc) != ERTS_AINT_NULL) {
Beispiel #16
0
Uint
erts_persistent_term_count(void)
{
    HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
    return hash_table->num_entries;
}
Beispiel #17
0
UWord
erts_ranges_sz(void)
{
    return erts_atomic_read_nob(&mem_used) * sizeof(Range);
}
Beispiel #18
0
BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
{
    static const Uint ITERATIONS_PER_RED = 32;
    ErtsPersistentTermErase1Context* ctx;
    Eterm state_mref = THE_NON_VALUE;
    long iterations_until_trap;
    long max_iterations;
#ifdef DEBUG
        (void)ITERATIONS_PER_RED;
        iterations_until_trap = max_iterations =
            GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx);
#else
        iterations_until_trap = max_iterations =
            ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P);
#endif
#define ERASE_TRAP_CODE                                                 \
        BIF_TRAP1(bif_export[BIF_persistent_term_erase_1], BIF_P, state_mref);
#define TRAPPING_COPY_TABLE_ERASE(TABLE_DEST, OLD_TABLE, NEW_SIZE, REHASH, LOC_NAME) \
        TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, REHASH, LOC_NAME, ERASE_TRAP_CODE)
    if (is_internal_magic_ref(BIF_ARG_1) &&
        (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) ==
         persistent_term_erase_1_ctx_bin_dtor)) {
        /* Restore the state after a trap */
        Binary* state_bin;
        state_mref = BIF_ARG_1;
        state_bin = erts_magic_ref2bin(state_mref);
        ctx = ERTS_MAGIC_BIN_DATA(state_bin);
        ASSERT(BIF_P->flags & F_DISABLE_GC);
        erts_set_gc_state(BIF_P, 1);
        switch (ctx->trap_location) {
        case ERASE1_TRAP_LOCATION_TMP_COPY:
            goto L_ERASE1_TRAP_LOCATION_TMP_COPY;
        case ERASE1_TRAP_LOCATION_FINAL_COPY:
            goto L_ERASE1_TRAP_LOCATION_FINAL_COPY;
        }
    } else {
        /* Save state in magic bin in case trapping is necessary */
        Eterm* hp;
        Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermErase1Context),
                                                     persistent_term_erase_1_ctx_bin_dtor);
        hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
        state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin);
        ctx = ERTS_MAGIC_BIN_DATA(state_bin);
        /*
         * IMPORTANT: The following two fields are used to detect if
         * persistent_term_erase_1_ctx_bin_dtor needs to free memory
         */
        ctx->cpy_ctx.new_table = NULL;
        ctx->tmp_table = NULL;
    }
    if (!try_seize_update_permission(BIF_P)) {
	ERTS_BIF_YIELD1(bif_export[BIF_persistent_term_erase_1],
                        BIF_P, BIF_ARG_1);
    }

    ctx->key = BIF_ARG_1;
    ctx->old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
    ctx->entry_index = lookup(ctx->old_table, ctx->key);
    ctx->old_term = ctx->old_table->term[ctx->entry_index];
    if (is_boxed(ctx->old_term)) {
        Uint new_size;
        /*
         * Since we don't use any delete markers, we must rehash
         * the table when deleting terms to ensure that all terms
         * can still be reached if there are hash collisions.
         * We can't rehash in place and it would not be safe to modify
         * the old table yet, so we will first need a new
         * temporary table copy of the same size as the old one.
         */

        ASSERT(is_tuple_arity(ctx->old_term, 2));
        TRAPPING_COPY_TABLE_ERASE(ctx->tmp_table,
                                  ctx->old_table,
                                  ctx->old_table->allocated,
                                  ERTS_PERSISTENT_TERM_CPY_TEMP,
                                  ERASE1_TRAP_LOCATION_TMP_COPY);

        /*
         * Delete the term from the temporary table. Then copy the
         * temporary table to a new table, rehashing the entries
         * while copying.
         */

        ctx->tmp_table->term[ctx->entry_index] = NIL;
        ctx->tmp_table->num_entries--;
        new_size = ctx->tmp_table->allocated;
        if (MUST_SHRINK(ctx->tmp_table)) {
            new_size /= 2;
        }
        TRAPPING_COPY_TABLE_ERASE(ctx->new_table,
                                  ctx->tmp_table,
                                  new_size,
                                  ERTS_PERSISTENT_TERM_CPY_REHASH,
                                  ERASE1_TRAP_LOCATION_FINAL_COPY);
        erts_free(ERTS_ALC_T_PERSISTENT_TERM_TMP, ctx->tmp_table);
        /*
         * IMPORTANT: Memory management depends on that ctx->tmp_table
         * is set to NULL on the line below
         */
        ctx->tmp_table = NULL;

        mark_for_deletion(ctx->old_table, ctx->entry_index);
        erts_schedule_thr_prgr_later_op(table_updater, ctx->new_table, &thr_prog_op);
        suspend_updater(BIF_P);
        BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
        ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
    }

    /*
     * Key is not present. Nothing to do.
     */

    ASSERT(is_nil(ctx->old_term));
    release_update_permission(0);
    BIF_RET(am_false);
}
Beispiel #19
0
BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2)
{
    static const Uint ITERATIONS_PER_RED = 32;
    ErtsPersistentTermPut2Context* ctx;
    Eterm state_mref = THE_NON_VALUE;
    long iterations_until_trap;
    long max_iterations;
#define PUT_TRAP_CODE                                                   \
    BIF_TRAP2(bif_export[BIF_persistent_term_put_2], BIF_P, state_mref, BIF_ARG_2)
#define TRAPPING_COPY_TABLE_PUT(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME) \
    TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME, PUT_TRAP_CODE)

#ifdef DEBUG
        (void)ITERATIONS_PER_RED;
        iterations_until_trap = max_iterations =
            GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx);
#else
        iterations_until_trap = max_iterations =
            ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P);
#endif
    if (is_internal_magic_ref(BIF_ARG_1) &&
        (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) ==
         persistent_term_put_2_ctx_bin_dtor)) {
        /* Restore state after a trap */
        Binary* state_bin;
        state_mref = BIF_ARG_1;
        state_bin = erts_magic_ref2bin(state_mref);
        ctx = ERTS_MAGIC_BIN_DATA(state_bin);
        ASSERT(BIF_P->flags & F_DISABLE_GC);
        erts_set_gc_state(BIF_P, 1);
        switch (ctx->trap_location) {
        case PUT2_TRAP_LOCATION_NEW_KEY:
            goto L_PUT2_TRAP_LOCATION_NEW_KEY;
        case PUT2_TRAP_LOCATION_REPLACE_VALUE:
            goto L_PUT2_TRAP_LOCATION_REPLACE_VALUE;
        }
    } else {
        /* Save state in magic bin in case trapping is necessary */
        Eterm* hp;
        Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermPut2Context),
                                                     persistent_term_put_2_ctx_bin_dtor);
        hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
        state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin);
        ctx = ERTS_MAGIC_BIN_DATA(state_bin);
        /*
         * IMPORTANT: The following field is used to detect if
         * persistent_term_put_2_ctx_bin_dtor needs to free memory
         */
        ctx->cpy_ctx.new_table = NULL;
    }


    if (!try_seize_update_permission(BIF_P)) {
	ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2],
                        BIF_P, BIF_ARG_1, BIF_ARG_2);
    }
    ctx->hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);

    ctx->key = BIF_ARG_1;
    ctx->term = BIF_ARG_2;

    ctx->entry_index = lookup(ctx->hash_table, ctx->key);

    ctx->heap[0] = make_arityval(2);
    ctx->heap[1] = ctx->key;
    ctx->heap[2] = ctx->term;
    ctx->tuple = make_tuple(ctx->heap);

    if (is_nil(ctx->hash_table->term[ctx->entry_index])) {
        Uint new_size = ctx->hash_table->allocated;
        if (MUST_GROW(ctx->hash_table)) {
            new_size *= 2;
        }
        TRAPPING_COPY_TABLE_PUT(ctx->hash_table,
                                ctx->hash_table,
                                new_size,
                                ERTS_PERSISTENT_TERM_CPY_NO_REHASH,
                                PUT2_TRAP_LOCATION_NEW_KEY);
        ctx->entry_index = lookup(ctx->hash_table, ctx->key);
        ctx->hash_table->num_entries++;
    } else {
        Eterm tuple = ctx->hash_table->term[ctx->entry_index];
        Eterm old_term;

        ASSERT(is_tuple_arity(tuple, 2));
        old_term = boxed_val(tuple)[2];
        if (EQ(ctx->term, old_term)) {
            /* Same value. No need to update anything. */
            release_update_permission(0);
            BIF_RET(am_ok);
        } else {
            /* Mark the old term for deletion. */
            mark_for_deletion(ctx->hash_table, ctx->entry_index);
            TRAPPING_COPY_TABLE_PUT(ctx->hash_table,
                                    ctx->hash_table,
                                    ctx->hash_table->allocated,
                                    ERTS_PERSISTENT_TERM_CPY_NO_REHASH,
                                    PUT2_TRAP_LOCATION_REPLACE_VALUE);
        }
    }

    {
        Uint term_size;
        Uint lit_area_size;
        ErlOffHeap code_off_heap;
        ErtsLiteralArea* literal_area;
        erts_shcopy_t info;
        Eterm* ptr;
        /*
         * Preserve internal sharing in the term by using the
         * sharing-preserving functions. However, literals must
         * be copied in case the module holding them are unloaded.
         */
        INITIALIZE_SHCOPY(info);
        info.copy_literals = 1;
        term_size = copy_shared_calculate(ctx->tuple, &info);
        ERTS_INIT_OFF_HEAP(&code_off_heap);
        lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size);
        literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size);
        ptr = &literal_area->start[0];
        literal_area->end = ptr + term_size;
        ctx->tuple = copy_shared_perform(ctx->tuple, term_size, &info, &ptr, &code_off_heap);
        ASSERT(tuple_val(ctx->tuple) == literal_area->start);
        literal_area->off_heap = code_off_heap.first;
        DESTROY_SHCOPY(info);
        erts_set_literal_tag(&ctx->tuple, literal_area->start, term_size);
        ctx->hash_table->term[ctx->entry_index] = ctx->tuple;

        erts_schedule_thr_prgr_later_op(table_updater, ctx->hash_table, &thr_prog_op);
        suspend_updater(BIF_P);
    }
    BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
    ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
}
Beispiel #20
0
static ERTS_INLINE int
leader_update(ErtsThrPrgrData *tpd)
{
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
    if (!tpd->leader) {
	/* Probably need to block... */
	block_thread(tpd);
    }
    else {
	ErtsThrPrgrVal current;
	int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged;
	erts_aint32_t lflgs;
	ErtsThrPrgrVal next;
	erts_aint_t refc;

	my_ix = tpd->id;

	if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) {
	    /* Took over as leader from another thread */
	    tpd->leader_state.current = read_nob(&erts_thr_prgr__.current);
	    tpd->leader_state.next = tpd->leader_state.current;
	    tpd->leader_state.next++;
	    if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING)
		tpd->leader_state.next = 0;
	    tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix;
	    tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting;
	    tpd->leader_state.umrefc_ix.current =
		(int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current);

	    if (tpd->confirmed == tpd->leader_state.current) {
		ErtsThrPrgrVal val = tpd->leader_state.current + 1;
		if (val == ERTS_THR_PRGR_VAL_WAITING)
		    val = 0;
		tpd->confirmed = val;
		set_mb(&intrnl->thr[my_ix].data.current, val);
	    }
	}


	next = tpd->leader_state.next;

	waiting_unmanaged = 0;
	umrefc_ix = -1; /* Shut up annoying warning */

	chk_next_ix = tpd->leader_state.chk_next_ix;
	no_managed = intrnl->managed.no;
	ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed);
	/* Check manged threads */
	if (chk_next_ix < no_managed) {
	    for (ix = chk_next_ix; ix < no_managed; ix++) {
		ErtsThrPrgrVal tmp;
		if (ix == my_ix)
		    continue;
		tmp = read_nob(&intrnl->thr[ix].data.current);
		if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
		    tpd->leader_state.chk_next_ix = ix;
		    ASSERT(erts_thr_progress_has_passed__(next, tmp));
		    goto done;
		}
	    }
	}

	/* Check unmanged threads */
	waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1;
	umrefc_ix = (waiting_unmanaged
		     ? tpd->leader_state.umrefc_ix.waiting
		     : tpd->leader_state.umrefc_ix.current);
	refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
	ASSERT(refc >= 0);
	if (refc != 0) {
	    int new_umrefc_ix;

	    if (waiting_unmanaged)
		goto done;

	    new_umrefc_ix = (umrefc_ix + 1) & 0x1;
	    tpd->leader_state.umrefc_ix.waiting = umrefc_ix;
	    tpd->leader_state.chk_next_ix = no_managed;
	    erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current,
				  (erts_aint32_t) new_umrefc_ix);
	    ETHR_MEMBAR(ETHR_StoreLoad);
	    refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
	    ASSERT(refc >= 0);
	    waiting_unmanaged = 1;
	    if (refc != 0)
		goto done;
	}

	/* Make progress */
	current = next;

	next++;
	if (next == ERTS_THR_PRGR_VAL_WAITING)
	    next = 0;

	set_nob(&intrnl->thr[my_ix].data.current, next);
	set_mb(&erts_thr_prgr__.current, current);
	tpd->confirmed = next;
	tpd->leader_state.next = next;
	tpd->leader_state.current = current;

#if ERTS_THR_PRGR_PRINT_VAL
	if (current % 1000 == 0)
	    erts_fprintf(stderr, "%b64u\n", current);
#endif
	handle_wakeup_requests(current);

	if (waiting_unmanaged) {
	    waiting_unmanaged = 0;
	    tpd->leader_state.umrefc_ix.waiting = -1;
	    erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs,
					~ERTS_THR_PRGR_LFLG_WAITING_UM);
	}
	tpd->leader_state.chk_next_ix = 0;

    done:

	if (tpd->active) {
	    lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		(void) block_thread(tpd);
	}
	else {
	    int force_wakeup_check = 0;
	    erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER;
	    tpd->leader = 0;
	    tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING;
#if ERTS_THR_PRGR_PRINT_LEADER
	    erts_fprintf(stderr, "L <- %d\n", tpd->id);
#endif

	    ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0);

	    intrnl->misc.data.umrefc_ix.waiting
		= tpd->leader_state.umrefc_ix.waiting;
	    if (waiting_unmanaged)
		set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM;

	    lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs,
						set_flags);
	    lflgs |= set_flags;
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		lflgs = block_thread(tpd);

	    if (waiting_unmanaged) {
		/* Need to check umrefc again */
		ETHR_MEMBAR(ETHR_StoreLoad);
		refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
		if (refc == 0) {
		    /* Need to force wakeup check */
		    force_wakeup_check = 1;
		}
	    }

	    if ((force_wakeup_check
		 || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
			       | ERTS_THR_PRGR_LFLG_WAITING_UM
			       | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
		     == ERTS_THR_PRGR_LFLG_NO_LEADER))
		&& got_sched_wakeups()) {
		/* Someone need to make progress */
		wakeup_managed(0);
	    }
	}
    }

    return tpd->leader;
}
Beispiel #21
0
int module_table_sz(void)
{
    return erts_atomic_read_nob(&tot_module_bytes);
}