Eterm erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity) { int cost; if (is_non_value(result)) { if (p->freason == TRAP) { #if HIPE if (regs == NULL) { regs = ERTS_PROC_GET_SCHDATA(p)->x_reg_array; } #endif cost = erts_garbage_collect(p, 0, regs, p->arity); } else { cost = erts_garbage_collect(p, 0, regs, arity); } } else { Eterm val[1]; val[0] = result; cost = erts_garbage_collect(p, 0, val, 1); result = val[0]; } BUMP_REDS(p, cost); return result; }
static BIF_RETTYPE persistent_term_info_trap(BIF_ALIST_1) { TrapData* trap_data = (TrapData *) BIF_ARG_1; Eterm res; Uint bump_reds; Binary* mbp; mbp = erts_magic_ref2bin(BIF_ARG_1); trap_data = ERTS_MAGIC_BIN_DATA(mbp); bump_reds = trap_data->remaining; res = do_info(BIF_P, trap_data); if (trap_data->remaining > 0) { ASSERT(res == am_ok); BUMP_ALL_REDS(BIF_P); BIF_TRAP1(&persistent_term_info_export, BIF_P, BIF_ARG_1); } else { /* * Decrement ref count (and possibly delete the hash table * and associated literal area). */ dec_table_refc(BIF_P, trap_data->table); trap_data->table = NULL; /* Prevent refc decrement */ BUMP_REDS(BIF_P, bump_reds); ASSERT(is_map(res)); BIF_RET(res); } }
static int subtract_get_length(Process *p, Eterm *iterator_p, Uint *count_p) { static const Sint ELEMENTS_PER_RED = 32; Sint budget, count; Eterm iterator; budget = ELEMENTS_PER_RED * ERTS_BIF_REDS_LEFT(p); iterator = *iterator_p; #ifdef DEBUG budget = budget / 10 + 1; #endif for (count = 0; count < budget && is_list(iterator); count++) { iterator = CDR(list_val(iterator)); } if (!is_list(iterator) && !is_nil(iterator)) { return -1; } BUMP_REDS(p, count / ELEMENTS_PER_RED); *iterator_p = iterator; *count_p += count; if (is_nil(iterator)) { return 1; } return 0; }
BIF_RETTYPE persistent_term_info_0(BIF_ALIST_0) { HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table); TrapData* trap_data; Eterm res = NIL; Eterm magic_ref; Binary* mbp; magic_ref = alloc_trap_data(BIF_P); mbp = erts_magic_ref2bin(magic_ref); trap_data = ERTS_MAGIC_BIN_DATA(mbp); trap_data->table = hash_table; trap_data->idx = 0; trap_data->remaining = hash_table->num_entries; trap_data->memory = 0; res = do_info(BIF_P, trap_data); if (trap_data->remaining == 0) { BUMP_REDS(BIF_P, hash_table->num_entries); trap_data->table = NULL; /* Prevent refc decrement */ BIF_RET(res); } else { /* * Increment the ref counter to prevent an update operation (by put/2 * or erase/1) to delete this hash table. */ erts_atomic_inc_nob(&hash_table->refc); BUMP_ALL_REDS(BIF_P); BIF_TRAP2(&persistent_term_info_export, BIF_P, magic_ref, res); } }
static BIF_RETTYPE lists_reverse_onheap(Process *c_p, Eterm list_in, Eterm tail_in) { static const Uint CELLS_PER_RED = 60; Eterm *alloc_start, *alloc_top, *alloc_end; Uint cells_left, max_cells; Eterm list, tail; list = list_in; tail = tail_in; cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p)); ASSERT(HEAP_LIMIT(c_p) >= HEAP_TOP(c_p) + 2); alloc_start = HEAP_TOP(c_p); alloc_end = HEAP_LIMIT(c_p) - 2; alloc_top = alloc_start; /* Don't process more cells than we have reductions for. */ alloc_end = MIN(alloc_top + (cells_left * 2), alloc_end); while (alloc_top < alloc_end && is_list(list)) { Eterm *pair = list_val(list); tail = CONS(alloc_top, CAR(pair), tail); list = CDR(pair); alloc_top += 2; } cells_left -= (alloc_top - alloc_start) / 2; HEAP_TOP(c_p) = alloc_top; ASSERT(cells_left >= 0 && cells_left <= max_cells); BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED); if (is_nil(list)) { BIF_RET(tail); } else if (is_list(list)) { ASSERT(is_list(tail)); if (cells_left > CELLS_PER_RED) { return lists_reverse_alloc(c_p, list, tail); } BUMP_ALL_REDS(c_p); BIF_TRAP2(bif_export[BIF_lists_reverse_2], c_p, list, tail); } BIF_ERROR(c_p, BADARG); }
static BIF_RETTYPE lists_reverse_alloc(Process *c_p, Eterm list_in, Eterm tail_in) { static const Uint CELLS_PER_RED = 40; Eterm *alloc_top, *alloc_end; Uint cells_left, max_cells; Eterm list, tail; Eterm lookahead; list = list_in; tail = tail_in; cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p)); lookahead = list; while (cells_left != 0 && is_list(lookahead)) { lookahead = CDR(list_val(lookahead)); cells_left--; } BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED); if (is_not_list(lookahead) && is_not_nil(lookahead)) { BIF_ERROR(c_p, BADARG); } alloc_top = HAlloc(c_p, 2 * (max_cells - cells_left)); alloc_end = alloc_top + 2 * (max_cells - cells_left); while (alloc_top < alloc_end) { Eterm *pair = list_val(list); tail = CONS(alloc_top, CAR(pair), tail); list = CDR(pair); ASSERT(is_list(list) || is_nil(list)); alloc_top += 2; } if (is_nil(list)) { BIF_RET(tail); } ASSERT(is_list(tail) && cells_left == 0); BIF_TRAP2(bif_export[BIF_lists_reverse_2], c_p, list, tail); }
static int subtract_set_build(Process *p, ErtsSubtractContext *context) { const static Sint INSERTIONS_PER_RED = 16; Sint budget, insertions; budget = INSERTIONS_PER_RED * ERTS_BIF_REDS_LEFT(p); insertions = 0; #ifdef DEBUG budget = budget / 10 + 1; #endif while (insertions < budget && is_list(context->iterator)) { subtract_tree_t *existing_node, *new_node; const Eterm *cell; Eterm value, next; cell = list_val(context->iterator); value = CAR(cell); next = CDR(cell); new_node = context->u.rhs_set.alloc; new_node->key = value; new_node->count = 1; existing_node = subtract_rbt_lookup_insert(&context->u.rhs_set.tree, new_node); if (existing_node != NULL) { existing_node->count++; } else { context->u.rhs_set.alloc++; } context->iterator = next; insertions++; } BUMP_REDS(p, insertions / INSERTIONS_PER_RED); ASSERT(is_list(context->iterator) || is_nil(context->iterator)); ASSERT(context->u.rhs_set.tree != NULL); return is_nil(context->iterator); }
static Eterm pd_hash_put(Process *p, Eterm id, Eterm value) { unsigned int hval; Eterm *hp; Eterm tpl; Eterm old; Eterm tmp; int needed; int i = 0; #ifdef DEBUG Eterm *hp_limit; #endif if (p->dictionary == NULL) { /* Create it */ array_put(&(p->dictionary), INITIAL_SIZE - 1, NIL); p->dictionary->homeSize = INITIAL_SIZE; } hval = pd_hash_value(p->dictionary, id); old = ARRAY_GET(p->dictionary, hval); /* * Calculate the number of heap words needed and garbage * collect if necessary. (Might be a slight overestimation.) */ needed = 3; /* {Key,Value} tuple */ if (is_boxed(old)) { /* * We don't want to compare keys twice, so we'll always * reserve the space for two CONS cells. */ needed += 2+2; } else if (is_list(old)) { i = 0; for (tmp = old; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) { ++i; } if (is_nil(tmp)) { i = -1; needed += 2; } else { needed += 2*(i+1); } } if (HeapWordsLeft(p) < needed) { Eterm root[3]; root[0] = id; root[1] = value; root[2] = old; BUMP_REDS(p, erts_garbage_collect(p, needed, root, 3)); id = root[0]; value = root[1]; old = root[2]; } #ifdef DEBUG hp_limit = p->htop + needed; #endif /* * Create the {Key,Value} tuple. */ hp = HeapOnlyAlloc(p, 3); tpl = TUPLE2(hp, id, value); /* * Update the dictionary. */ if (is_nil(old)) { array_put(&(p->dictionary), hval, tpl); ++(p->dictionary->numElements); } else if (is_boxed(old)) { ASSERT(is_tuple(old)); if (EQ(tuple_val(old)[1],id)) { array_put(&(p->dictionary), hval, tpl); return tuple_val(old)[2]; } else { hp = HeapOnlyAlloc(p, 4); tmp = CONS(hp, old, NIL); hp += 2; ++(p->dictionary->numElements); array_put(&(p->dictionary), hval, CONS(hp, tpl, tmp)); hp += 2; ASSERT(hp <= hp_limit); } } else if (is_list(old)) { if (i == -1) { /* * New key. Simply prepend the tuple to the beginning of the list. */ hp = HeapOnlyAlloc(p, 2); array_put(&(p->dictionary), hval, CONS(hp, tpl, old)); hp += 2; ASSERT(hp <= hp_limit); ++(p->dictionary->numElements); } else { /* * i = Number of CDRs to skip to reach the changed element in the list. * * Replace old value in list. To avoid pointers from the old generation * to the new, we must rebuild the list from the beginning up to and * including the changed element. */ Eterm nlist; int j; hp = HeapOnlyAlloc(p, (i+1)*2); /* Find the list element to change. */ for (j = 0, nlist = old; j < i; j++, nlist = TCDR(nlist)) { ; } ASSERT(EQ(tuple_val(TCAR(nlist))[1], id)); nlist = TCDR(nlist); /* Unchanged part of list. */ /* Rebuild list before the updated element. */ for (tmp = old; i-- > 0; tmp = TCDR(tmp)) { nlist = CONS(hp, TCAR(tmp), nlist); hp += 2; } ASSERT(EQ(tuple_val(TCAR(tmp))[1], id)); /* Put the updated element first in the new list. */ nlist = CONS(hp, tpl, nlist); hp += 2; ASSERT(hp <= hp_limit); array_put(&(p->dictionary), hval, nlist); return tuple_val(TCAR(tmp))[2]; } } else { #ifdef DEBUG erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" "%T\n", p->common.id, __LINE__, old); #endif erl_exit(1, "Damaged process dictionary found during put/2."); } if (HASH_RANGE(p->dictionary) <= p->dictionary->numElements) { grow(p); } return am_undefined; }
static BIF_RETTYPE iol2v_continue(iol2v_state_t *state) { Eterm iterator; DECLARE_ESTACK(s); ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); state->bytereds_available = ERTS_BIF_REDS_LEFT(state->process) * IOL2V_SMALL_BIN_LIMIT; state->bytereds_spent = 0; if (state->estack.start) { ESTACK_RESTORE(s, &state->estack); } iterator = state->input_list; for(;;) { if (state->bytereds_spent >= state->bytereds_available) { ESTACK_SAVE(s, &state->estack); state->input_list = iterator; return iol2v_yield(state); } while (is_list(iterator)) { Eterm *cell; Eterm head; cell = list_val(iterator); head = CAR(cell); if (is_binary(head)) { if (!iol2v_append_binary(state, head)) { goto l_badarg; } iterator = CDR(cell); } else if (is_small(head)) { Eterm seq_end; if (!iol2v_append_byte_seq(state, iterator, &seq_end)) { goto l_badarg; } iterator = seq_end; } else if (is_list(head) || is_nil(head)) { Eterm tail = CDR(cell); if (!is_nil(tail)) { ESTACK_PUSH(s, tail); } state->bytereds_spent += 1; iterator = head; } else { goto l_badarg; } if (state->bytereds_spent >= state->bytereds_available) { ESTACK_SAVE(s, &state->estack); state->input_list = iterator; return iol2v_yield(state); } } if (is_binary(iterator)) { if (!iol2v_append_binary(state, iterator)) { goto l_badarg; } } else if (!is_nil(iterator)) { goto l_badarg; } if(ESTACK_ISEMPTY(s)) { break; } iterator = ESTACK_POP(s); } if (state->acc_size != 0) { iol2v_enqueue_result(state, iol2v_promote_acc(state)); } BUMP_REDS(state->process, state->bytereds_spent / IOL2V_SMALL_BIN_LIMIT); CLEAR_SAVED_ESTACK(&state->estack); DESTROY_ESTACK(s); BIF_RET(state->result_head); l_badarg: CLEAR_SAVED_ESTACK(&state->estack); DESTROY_ESTACK(s); if (state->acc != NULL) { erts_bin_free(state->acc); state->acc = NULL; } BIF_ERROR(state->process, BADARG); }
BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1) { static const Uint ITERATIONS_PER_RED = 32; ErtsPersistentTermErase1Context* ctx; Eterm state_mref = THE_NON_VALUE; long iterations_until_trap; long max_iterations; #ifdef DEBUG (void)ITERATIONS_PER_RED; iterations_until_trap = max_iterations = GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx); #else iterations_until_trap = max_iterations = ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P); #endif #define ERASE_TRAP_CODE \ BIF_TRAP1(bif_export[BIF_persistent_term_erase_1], BIF_P, state_mref); #define TRAPPING_COPY_TABLE_ERASE(TABLE_DEST, OLD_TABLE, NEW_SIZE, REHASH, LOC_NAME) \ TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, REHASH, LOC_NAME, ERASE_TRAP_CODE) if (is_internal_magic_ref(BIF_ARG_1) && (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) == persistent_term_erase_1_ctx_bin_dtor)) { /* Restore the state after a trap */ Binary* state_bin; state_mref = BIF_ARG_1; state_bin = erts_magic_ref2bin(state_mref); ctx = ERTS_MAGIC_BIN_DATA(state_bin); ASSERT(BIF_P->flags & F_DISABLE_GC); erts_set_gc_state(BIF_P, 1); switch (ctx->trap_location) { case ERASE1_TRAP_LOCATION_TMP_COPY: goto L_ERASE1_TRAP_LOCATION_TMP_COPY; case ERASE1_TRAP_LOCATION_FINAL_COPY: goto L_ERASE1_TRAP_LOCATION_FINAL_COPY; } } else { /* Save state in magic bin in case trapping is necessary */ Eterm* hp; Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermErase1Context), persistent_term_erase_1_ctx_bin_dtor); hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE); state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin); ctx = ERTS_MAGIC_BIN_DATA(state_bin); /* * IMPORTANT: The following two fields are used to detect if * persistent_term_erase_1_ctx_bin_dtor needs to free memory */ ctx->cpy_ctx.new_table = NULL; ctx->tmp_table = NULL; } if (!try_seize_update_permission(BIF_P)) { ERTS_BIF_YIELD1(bif_export[BIF_persistent_term_erase_1], BIF_P, BIF_ARG_1); } ctx->key = BIF_ARG_1; ctx->old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table); ctx->entry_index = lookup(ctx->old_table, ctx->key); ctx->old_term = ctx->old_table->term[ctx->entry_index]; if (is_boxed(ctx->old_term)) { Uint new_size; /* * Since we don't use any delete markers, we must rehash * the table when deleting terms to ensure that all terms * can still be reached if there are hash collisions. * We can't rehash in place and it would not be safe to modify * the old table yet, so we will first need a new * temporary table copy of the same size as the old one. */ ASSERT(is_tuple_arity(ctx->old_term, 2)); TRAPPING_COPY_TABLE_ERASE(ctx->tmp_table, ctx->old_table, ctx->old_table->allocated, ERTS_PERSISTENT_TERM_CPY_TEMP, ERASE1_TRAP_LOCATION_TMP_COPY); /* * Delete the term from the temporary table. Then copy the * temporary table to a new table, rehashing the entries * while copying. */ ctx->tmp_table->term[ctx->entry_index] = NIL; ctx->tmp_table->num_entries--; new_size = ctx->tmp_table->allocated; if (MUST_SHRINK(ctx->tmp_table)) { new_size /= 2; } TRAPPING_COPY_TABLE_ERASE(ctx->new_table, ctx->tmp_table, new_size, ERTS_PERSISTENT_TERM_CPY_REHASH, ERASE1_TRAP_LOCATION_FINAL_COPY); erts_free(ERTS_ALC_T_PERSISTENT_TERM_TMP, ctx->tmp_table); /* * IMPORTANT: Memory management depends on that ctx->tmp_table * is set to NULL on the line below */ ctx->tmp_table = NULL; mark_for_deletion(ctx->old_table, ctx->entry_index); erts_schedule_thr_prgr_later_op(table_updater, ctx->new_table, &thr_prog_op); suspend_updater(BIF_P); BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); } /* * Key is not present. Nothing to do. */ ASSERT(is_nil(ctx->old_term)); release_update_permission(0); BIF_RET(am_false); }
static int subtract_naive_lhs(Process *p, ErtsSubtractContext *context) { const Sint CHECKS_PER_RED = 16; Sint checks, budget; budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p); checks = 0; while (checks < budget && is_list(context->iterator)) { const Eterm *cell; Eterm value, next; int found_at; cell = list_val(context->iterator); value = CAR(cell); next = CDR(cell); for (found_at = 0; found_at < context->lhs_remaining; found_at++) { if (EQ(value, context->u.lhs_elements[found_at])) { /* We shift the array one step down as we have to preserve * order. * * Note that we can't exit early as that would suppress errors * in the right-hand side (this runs prior to determining the * length of RHS). */ context->lhs_remaining--; sys_memmove(&context->u.lhs_elements[found_at], &context->u.lhs_elements[found_at + 1], (context->lhs_remaining - found_at) * sizeof(Eterm)); break; } } checks += MAX(1, context->lhs_remaining); context->iterator = next; } BUMP_REDS(p, MIN(checks, budget) / CHECKS_PER_RED); if (is_list(context->iterator)) { return 0; } else if (!is_nil(context->iterator)) { return -1; } if (context->lhs_remaining > 0) { Eterm *hp; int i; hp = HAlloc(p, context->lhs_remaining * 2); for (i = context->lhs_remaining - 1; i >= 0; i--) { Eterm value = context->u.lhs_elements[i]; context->result = CONS(hp, value, context->result); hp += 2; } } ASSERT(context->lhs_remaining > 0 || context->result == NIL); return 1; }
void erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, Eterm message) { Uint msize; ErtsMessage* mp; ErlOffHeap *ohp; Eterm token = NIL; #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; Eterm utag = NIL; #endif erts_aint32_t receiver_state; #ifdef SHCOPY_SEND erts_shcopy_t info; #else erts_literal_area_t litarea; INITIALIZE_LITERAL_PURGE_AREA(litarea); #endif #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id); erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id); } #endif receiver_state = erts_atomic32_read_nob(&receiver->state); if (SEQ_TRACE_TOKEN(sender) != NIL) { Eterm* hp; Eterm stoken = SEQ_TRACE_TOKEN(sender); Uint seq_trace_size = 0; #ifdef USE_VM_PROBES Uint dt_utag_size = 0; #endif /* SHCOPY corrupts the heap between * copy_shared_calculate, and * copy_shared_perform. (it inserts move_markers like the gc). * Make sure we don't use the heap between those instances. */ if (have_seqtrace(stoken)) { seq_trace_update_serial(sender); seq_trace_output(stoken, message, SEQ_TRACE_SEND, receiver->common.id, sender); seq_trace_size = size_object(stoken); } #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { dt_utag_size = size_object(DT_UTAG(sender)); } else if (stoken == am_have_dt_utag ) { stoken = NIL; } #endif #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else msize = size_object_litopt(message, &litarea); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, receiver_locks, (msize #ifdef USE_VM_PROBES + dt_utag_size #endif + seq_trace_size), &hp, &ohp); #ifdef SHCOPY_SEND if (is_not_immed(message)) message = copy_shared_perform(message, msize, &info, &hp, ohp); DESTROY_SHCOPY(info); #else if (is_not_immed(message)) message = copy_struct_litopt(message, msize, &hp, ohp, &litarea); #endif if (is_immed(stoken)) token = stoken; else token = copy_struct(stoken, seq_trace_size, &hp, ohp); #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { if (is_immed(DT_UTAG(sender))) utag = DT_UTAG(sender); else utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, ohp); } if (DTRACE_ENABLED(message_send)) { if (have_seqtrace(stoken)) { tok_label = SEQ_TRACE_T_DTRACE_LABEL(stoken); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken)); } DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); } #endif } else { Eterm *hp; if (receiver == sender && !(receiver_state & ERTS_PSFLG_OFF_HEAP_MSGQ)) { mp = erts_alloc_message(0, NULL); msize = 0; } else { #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else msize = size_object_litopt(message, &litarea); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, receiver_locks, msize, &hp, &ohp); #ifdef SHCOPY_SEND if (is_not_immed(message)) message = copy_shared_perform(message, msize, &info, &hp, ohp); DESTROY_SHCOPY(info); #else if (is_not_immed(message)) message = copy_struct_litopt(message, msize, &hp, ohp, &litarea); #endif } #ifdef USE_VM_PROBES DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); #endif } ERL_MESSAGE_TOKEN(mp) = token; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = utag; #endif erts_queue_proc_message(sender, receiver, *receiver_locks, mp, message); if (msize > ERTS_MSG_COPY_WORDS_PER_REDUCTION) { Uint reds = msize / ERTS_MSG_COPY_WORDS_PER_REDUCTION; if (reds > CONTEXT_REDS) reds = CONTEXT_REDS; BUMP_REDS(sender, (int) reds); } }
static int subtract_set_finish(Process *p, ErtsSubtractContext *context) { const Sint CHECKS_PER_RED = 8; Sint checks, budget; budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p); checks = 0; #ifdef DEBUG budget = budget / 10 + 1; #endif while (checks < budget && is_list(context->iterator)) { subtract_tree_t *node; const Eterm *cell; Eterm value, next; cell = list_val(context->iterator); value = CAR(cell); next = CDR(cell); ASSERT(context->rhs_remaining > 0); node = subtract_rbt_lookup(context->u.rhs_set.tree, value); if (node == NULL) { Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2); *context->result_cdr = make_list(hp); context->result_cdr = &CDR(hp); CAR(hp) = value; } else { if (context->rhs_remaining-- == 1) { *context->result_cdr = next; BUMP_REDS(p, checks / CHECKS_PER_RED); return 1; } if (node->count-- == 1) { subtract_rbt_delete(&context->u.rhs_set.tree, node); } } context->iterator = next; context->lhs_remaining--; checks++; } *context->result_cdr = NIL; BUMP_REDS(p, checks / CHECKS_PER_RED); if (is_list(context->iterator)) { ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0); return 0; } return 1; }
static int subtract_naive_rhs(Process *p, ErtsSubtractContext *context) { const Sint CHECKS_PER_RED = 16; Sint checks, budget; budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p); checks = 0; #ifdef DEBUG budget = budget / 10 + 1; #endif while (checks < budget && is_list(context->iterator)) { const Eterm *cell; Eterm value, next; int found_at; cell = list_val(context->iterator); value = CAR(cell); next = CDR(cell); for (found_at = context->rhs_remaining - 1; found_at >= 0; found_at--) { if (EQ(value, context->u.rhs_elements[found_at])) { break; } } if (found_at < 0) { /* Destructively add the value to the result. This is safe * since the GC is disabled and the unfinished term is never * leaked to the outside world. */ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2); *context->result_cdr = make_list(hp); context->result_cdr = &CDR(hp); CAR(hp) = value; } else if (found_at >= 0) { Eterm swap; if (context->rhs_remaining-- == 1) { /* We've run out of items to remove, so the rest of the * result will be equal to the remainder of the input. We know * that LHS is well-formed as any errors would've been reported * during length determination. */ *context->result_cdr = next; BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED); return 1; } swap = context->u.rhs_elements[context->rhs_remaining]; context->u.rhs_elements[found_at] = swap; } checks += context->rhs_remaining; context->iterator = next; context->lhs_remaining--; } /* The result only has to be terminated when returning it to the user, but * we're doing it when trapping as well to prevent headaches when * debugging. */ *context->result_cdr = NIL; BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED); if (is_list(context->iterator)) { ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0); return 0; } return 1; }
static void shrink(Process *p, Eterm* ret) { unsigned int range = HASH_RANGE(p->dictionary); unsigned int steps = (range*3) / 10; Eterm hi, lo, tmp; unsigned int i; Eterm *hp; #ifdef DEBUG Eterm *hp_limit; #endif if (range - steps < INITIAL_SIZE) { steps = range - INITIAL_SIZE; } for (i = 0; i < steps; ++i) { ProcDict *pd = p->dictionary; if (pd->splitPosition == 0) { pd->homeSize /= 2; pd->splitPosition = pd->homeSize; } --(pd->splitPosition); hi = ARRAY_GET(pd, (pd->splitPosition + pd->homeSize)); lo = ARRAY_GET(pd, pd->splitPosition); if (hi != NIL) { if (lo == NIL) { array_put(&(p->dictionary), pd->splitPosition, hi); } else { int needed = 4; if (is_list(hi) && is_list(lo)) { needed = 2*erts_list_length(hi); } if (HeapWordsLeft(p) < needed) { BUMP_REDS(p, erts_garbage_collect(p, needed, ret, 1)); hi = pd->data[(pd->splitPosition + pd->homeSize)]; lo = pd->data[pd->splitPosition]; } #ifdef DEBUG hp_limit = p->htop + needed; #endif if (is_tuple(lo)) { if (is_tuple(hi)) { hp = HeapOnlyAlloc(p, 4); tmp = CONS(hp, hi, NIL); hp += 2; array_put(&(p->dictionary), pd->splitPosition, CONS(hp,lo,tmp)); hp += 2; ASSERT(hp <= hp_limit); } else { /* hi is a list */ hp = HeapOnlyAlloc(p, 2); array_put(&(p->dictionary), pd->splitPosition, CONS(hp, lo, hi)); hp += 2; ASSERT(hp <= hp_limit); } } else { /* lo is a list */ if (is_tuple(hi)) { hp = HeapOnlyAlloc(p, 2); array_put(&(p->dictionary), pd->splitPosition, CONS(hp, hi, lo)); hp += 2; ASSERT(hp <= hp_limit); } else { /* Two lists */ hp = HeapOnlyAlloc(p, needed); for (tmp = hi; tmp != NIL; tmp = TCDR(tmp)) { lo = CONS(hp, TCAR(tmp), lo); hp += 2; } ASSERT(hp <= hp_limit); array_put(&(p->dictionary), pd->splitPosition, lo); } } } } array_put(&(p->dictionary), (pd->splitPosition + pd->homeSize), NIL); } if (HASH_RANGE(p->dictionary) <= (p->dictionary->size / 4)) { array_shrink(&(p->dictionary), (HASH_RANGE(p->dictionary) * 3) / 2); } }
static void grow(Process *p) { unsigned int i,j; unsigned int steps = p->dictionary->homeSize / 5; Eterm l1,l2; Eterm l; Eterm *hp; unsigned int pos; unsigned int homeSize; int needed = 0; ProcDict *pd; #ifdef DEBUG Eterm *hp_limit; #endif HDEBUGF(("grow: steps = %d", steps)); if (steps == 0) steps = 1; /* Dont grow over MAX_HASH */ if ((MAX_HASH - steps) <= HASH_RANGE(p->dictionary)) { return; } /* * Calculate total number of heap words needed, and garbage collect * if necessary. */ pd = p->dictionary; pos = pd->splitPosition; homeSize = pd->homeSize; for (i = 0; i < steps; ++i) { if (pos == homeSize) { homeSize *= 2; pos = 0; } l = ARRAY_GET(pd, pos); pos++; if (is_not_tuple(l)) { while (l != NIL) { needed += 2; l = TCDR(l); } } } if (HeapWordsLeft(p) < needed) { BUMP_REDS(p, erts_garbage_collect(p, needed, 0, 0)); } #ifdef DEBUG hp_limit = p->htop + needed; #endif /* * Now grow. */ for (i = 0; i < steps; ++i) { ProcDict *pd = p->dictionary; if (pd->splitPosition == pd->homeSize) { pd->homeSize *= 2; pd->splitPosition = 0; } pos = pd->splitPosition; ++pd->splitPosition; /* For the hashes */ l = ARRAY_GET(pd, pos); if (is_tuple(l)) { if (pd_hash_value(pd, tuple_val(l)[1]) != pos) { array_put(&(p->dictionary), pos + p->dictionary->homeSize, l); array_put(&(p->dictionary), pos, NIL); } } else { l2 = NIL; l1 = l; for (j = 0; l1 != NIL; l1 = TCDR(l1)) j += 2; hp = HeapOnlyAlloc(p, j); while (l != NIL) { if (pd_hash_value(pd, tuple_val(TCAR(l))[1]) == pos) l1 = CONS(hp, TCAR(l), l1); else l2 = CONS(hp, TCAR(l), l2); hp += 2; l = TCDR(l); } if (l1 != NIL && TCDR(l1) == NIL) l1 = TCAR(l1); if (l2 != NIL && TCDR(l2) == NIL) l2 = TCAR(l2); ASSERT(hp <= hp_limit); /* After array_put pd is no longer valid */ array_put(&(p->dictionary), pos, l1); array_put(&(p->dictionary), pos + p->dictionary->homeSize, l2); } } #ifdef HARDDEBUG dictionary_dump(p->dictionary,CERR); #endif }
BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2) { static const Uint ITERATIONS_PER_RED = 32; ErtsPersistentTermPut2Context* ctx; Eterm state_mref = THE_NON_VALUE; long iterations_until_trap; long max_iterations; #define PUT_TRAP_CODE \ BIF_TRAP2(bif_export[BIF_persistent_term_put_2], BIF_P, state_mref, BIF_ARG_2) #define TRAPPING_COPY_TABLE_PUT(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME) \ TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME, PUT_TRAP_CODE) #ifdef DEBUG (void)ITERATIONS_PER_RED; iterations_until_trap = max_iterations = GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx); #else iterations_until_trap = max_iterations = ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P); #endif if (is_internal_magic_ref(BIF_ARG_1) && (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) == persistent_term_put_2_ctx_bin_dtor)) { /* Restore state after a trap */ Binary* state_bin; state_mref = BIF_ARG_1; state_bin = erts_magic_ref2bin(state_mref); ctx = ERTS_MAGIC_BIN_DATA(state_bin); ASSERT(BIF_P->flags & F_DISABLE_GC); erts_set_gc_state(BIF_P, 1); switch (ctx->trap_location) { case PUT2_TRAP_LOCATION_NEW_KEY: goto L_PUT2_TRAP_LOCATION_NEW_KEY; case PUT2_TRAP_LOCATION_REPLACE_VALUE: goto L_PUT2_TRAP_LOCATION_REPLACE_VALUE; } } else { /* Save state in magic bin in case trapping is necessary */ Eterm* hp; Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermPut2Context), persistent_term_put_2_ctx_bin_dtor); hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE); state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin); ctx = ERTS_MAGIC_BIN_DATA(state_bin); /* * IMPORTANT: The following field is used to detect if * persistent_term_put_2_ctx_bin_dtor needs to free memory */ ctx->cpy_ctx.new_table = NULL; } if (!try_seize_update_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } ctx->hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table); ctx->key = BIF_ARG_1; ctx->term = BIF_ARG_2; ctx->entry_index = lookup(ctx->hash_table, ctx->key); ctx->heap[0] = make_arityval(2); ctx->heap[1] = ctx->key; ctx->heap[2] = ctx->term; ctx->tuple = make_tuple(ctx->heap); if (is_nil(ctx->hash_table->term[ctx->entry_index])) { Uint new_size = ctx->hash_table->allocated; if (MUST_GROW(ctx->hash_table)) { new_size *= 2; } TRAPPING_COPY_TABLE_PUT(ctx->hash_table, ctx->hash_table, new_size, ERTS_PERSISTENT_TERM_CPY_NO_REHASH, PUT2_TRAP_LOCATION_NEW_KEY); ctx->entry_index = lookup(ctx->hash_table, ctx->key); ctx->hash_table->num_entries++; } else { Eterm tuple = ctx->hash_table->term[ctx->entry_index]; Eterm old_term; ASSERT(is_tuple_arity(tuple, 2)); old_term = boxed_val(tuple)[2]; if (EQ(ctx->term, old_term)) { /* Same value. No need to update anything. */ release_update_permission(0); BIF_RET(am_ok); } else { /* Mark the old term for deletion. */ mark_for_deletion(ctx->hash_table, ctx->entry_index); TRAPPING_COPY_TABLE_PUT(ctx->hash_table, ctx->hash_table, ctx->hash_table->allocated, ERTS_PERSISTENT_TERM_CPY_NO_REHASH, PUT2_TRAP_LOCATION_REPLACE_VALUE); } } { Uint term_size; Uint lit_area_size; ErlOffHeap code_off_heap; ErtsLiteralArea* literal_area; erts_shcopy_t info; Eterm* ptr; /* * Preserve internal sharing in the term by using the * sharing-preserving functions. However, literals must * be copied in case the module holding them are unloaded. */ INITIALIZE_SHCOPY(info); info.copy_literals = 1; term_size = copy_shared_calculate(ctx->tuple, &info); ERTS_INIT_OFF_HEAP(&code_off_heap); lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size); literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size); ptr = &literal_area->start[0]; literal_area->end = ptr + term_size; ctx->tuple = copy_shared_perform(ctx->tuple, term_size, &info, &ptr, &code_off_heap); ASSERT(tuple_val(ctx->tuple) == literal_area->start); literal_area->off_heap = code_off_heap.first; DESTROY_SHCOPY(info); erts_set_literal_tag(&ctx->tuple, literal_area->start, term_size); ctx->hash_table->term[ctx->entry_index] = ctx->tuple; erts_schedule_thr_prgr_later_op(table_updater, ctx->hash_table, &thr_prog_op); suspend_updater(BIF_P); } BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED); ERTS_BIF_YIELD_RETURN(BIF_P, am_ok); }