void erts_flxctr_destroy(ErtsFlxCtr* c, ErtsAlcType_t type) { if (c->is_decentralized) { if (erts_flxctr_is_snapshot_ongoing(c)) { ErtsFlxCtrDecentralizedCtrArray* array = ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c); /* Try to delegate the resposibilty of freeing to thr_prg_wake_up_and_count */ Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING; if (expected != erts_atomic_cmpxchg_mb(&array->snapshot_status, ERTS_FLXCTR_SNAPSHOT_ONGOING_TP_THREAD_DO_FREE, expected)) { /* The delegation was unsuccessful which means that no snapshot is ongoing anymore and the freeing needs to be done here */ ERTS_ASSERT(!erts_flxctr_is_snapshot_ongoing(c)); erts_free(type, array->block_start); } } else { erts_free(type, ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c)->block_start); } } }
ErtsMessage * erts_factory_message_create(ErtsHeapFactory* factory, Process *proc, ErtsProcLocks *proc_locksp, Uint sz) { Eterm *hp; ErlOffHeap *ohp; ErtsMessage *msgp; int on_heap; erts_aint32_t state; state = proc ? erts_atomic32_read_nob(&proc->state) : 0; if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) { msgp = erts_alloc_message(sz, &hp); ohp = sz == 0 ? NULL : &msgp->hfrag.off_heap; on_heap = 0; } else { msgp = erts_try_alloc_message_on_heap(proc, &state, proc_locksp, sz, &hp, &ohp, &on_heap); } if (on_heap) { ERTS_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN); ASSERT(ohp == &proc->off_heap); factory->mode = FACTORY_HALLOC; factory->p = proc; factory->heap_frags_saved = proc->mbuf; factory->heap_frags_saved_used = proc->mbuf ? proc->mbuf->used_size : 0; } else { factory->mode = FACTORY_MESSAGE; factory->p = NULL; factory->heap_frags_saved = NULL; factory->heap_frags_saved_used = 0; if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) { ASSERT(!msgp->hfrag.next); factory->heap_frags = NULL; } else { ASSERT(!msgp->data.heap_frag || !msgp->data.heap_frag->next); factory->heap_frags = msgp->data.heap_frag; } } factory->hp_start = hp; factory->hp = hp; factory->hp_end = hp + sz; factory->message = msgp; factory->off_heap = ohp; factory->alloc_type = ERTS_ALC_T_HEAP_FRAG; if (ohp) { factory->off_heap_saved.first = ohp->first; factory->off_heap_saved.overhead = ohp->overhead; } else { factory->off_heap_saved.first = NULL; factory->off_heap_saved.overhead = 0; } ASSERT(factory->hp >= factory->hp_start && factory->hp <= factory->hp_end); return msgp; }
static int subtract_continue(Process *p, ErtsSubtractContext *context) { switch (context->stage) { case SUBTRACT_STAGE_START: { return subtract_enter_len_lhs(p, context); } case SUBTRACT_STAGE_LEN_LHS: { int res = subtract_get_length(p, &context->iterator, &context->lhs_remaining); if (res != 1) { return res; } if (context->lhs_remaining <= SUBTRACT_LHS_THRESHOLD) { return subtract_enter_naive_lhs(p, context); } return subtract_enter_len_rhs(p, context); } case SUBTRACT_STAGE_NAIVE_LHS: { return subtract_naive_lhs(p, context); } case SUBTRACT_STAGE_LEN_RHS: { int res = subtract_get_length(p, &context->iterator, &context->rhs_remaining); if (res != 1) { return res; } /* We've walked through both lists fully now so we no longer need * to check for errors past this point. */ if (context->rhs_remaining <= SUBTRACT_RHS_THRESHOLD) { return subtract_enter_naive_rhs(p, context); } return subtract_enter_set_build(p, context); } case SUBTRACT_STAGE_NAIVE_RHS: { return subtract_naive_rhs(p, context); } case SUBTRACT_STAGE_SET_BUILD: { int res = subtract_set_build(p, context); if (res != 1) { return res; } return subtract_enter_set_finish(p, context); } case SUBTRACT_STAGE_SET_FINISH: { return subtract_set_finish(p, context); } default: ERTS_ASSERT(!"unreachable"); } }