BIF_RETTYPE port_set_data_2(BIF_ALIST_2) { Port* prt; Eterm portid = BIF_ARG_1; Eterm data = BIF_ARG_2; prt = id_or_name2port(BIF_P, portid); if (!prt) { BIF_ERROR(BIF_P, BADARG); } if (prt->bp != NULL) { free_message_buffer(prt->bp); prt->bp = NULL; } if (IS_CONST(data)) { prt->data = data; } else { Uint size; ErlHeapFragment* bp; Eterm* hp; size = size_object(data); prt->bp = bp = new_message_buffer(size); hp = bp->mem; prt->data = copy_struct(data, size, &hp, &bp->off_heap); } erts_smp_port_unlock(prt); BIF_RET(am_true); }
Eterm erts_msg_distext2heap(Process *pp, ErtsProcLocks *plcksp, ErlHeapFragment **bpp, Eterm *tokenp, ErtsDistExternal *dist_extp) { Eterm msg; Uint tok_sz = 0; Eterm *hp = NULL; ErtsHeapFactory factory; Sint sz; *bpp = NULL; sz = erts_decode_dist_ext_size(dist_extp); if (sz < 0) goto decode_error; if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); tok_sz = heap_frag->used_size; sz += tok_sz; } if (pp) { ErlOffHeap *ohp; hp = erts_alloc_message_heap(sz, bpp, &ohp, pp, plcksp); } else { *bpp = new_message_buffer(sz); hp = (*bpp)->mem; } erts_factory_message_init(&factory, pp, hp, *bpp); msg = erts_decode_dist_ext(&factory, dist_extp); if (is_non_value(msg)) goto decode_error; if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); hp = erts_produce_heap(&factory, tok_sz, 0); *tokenp = copy_struct(*tokenp, tok_sz, &hp, factory.off_heap); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_extp); erts_factory_close(&factory); return msg; decode_error: if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_extp); *bpp = NULL; return THE_NON_VALUE; }
void erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, Eterm reason, Eterm token) { Eterm mess; Eterm save; Eterm from_copy; Uint sz_reason; Uint sz_token; Uint sz_from; Eterm* hp; Eterm temptoken; ErlHeapFragment* bp = NULL; if (token != NIL #ifdef USE_VM_PROBES && token != am_have_dt_utag #endif ) { ASSERT(is_tuple(token)); sz_reason = size_object(reason); sz_token = size_object(token); sz_from = size_object(from); bp = new_message_buffer(sz_reason + sz_from + sz_token + 4); hp = bp->mem; mess = copy_struct(reason, sz_reason, &hp, &bp->off_heap); from_copy = copy_struct(from, sz_from, &hp, &bp->off_heap); save = TUPLE3(hp, am_EXIT, from_copy, mess); hp += 4; /* the trace token must in this case be updated by the caller */ seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL); temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap); erts_queue_message(to, to_locksp, bp, save, temptoken); } else { ErlOffHeap *ohp; sz_reason = size_object(reason); sz_from = IS_CONST(from) ? 0 : size_object(from); hp = erts_alloc_message_heap(sz_reason+sz_from+4, &bp, &ohp, to, to_locksp); mess = copy_struct(reason, sz_reason, &hp, ohp); from_copy = (IS_CONST(from) ? from : copy_struct(from, sz_from, &hp, ohp)); save = TUPLE3(hp, am_EXIT, from_copy, mess); erts_queue_message(to, to_locksp, bp, save, NIL); } }
Sint erts_move_messages_off_heap(Process *c_p) { int reds = 1; /* * Move all messages off heap. This *only* occurs when the * process had off heap message disabled and just enabled * it... */ ErtsMessage *mp; reds += c_p->msg.len / 10; ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); for (mp = c_p->msg.first; mp; mp = mp->next) { Uint msg_sz, token_sz; #ifdef USE_VM_PROBES Uint utag_sz; #endif Eterm *hp; ErlHeapFragment *hfrag; if (mp->data.attached) continue; if (is_immed(ERL_MESSAGE_TERM(mp)) #ifdef USE_VM_PROBES && is_immed(ERL_MESSAGE_DT_UTAG(mp)) #endif && is_not_immed(ERL_MESSAGE_TOKEN(mp))) continue; /* * The message refers into the heap. Copy the message * from the heap into a heap fragment and attach * it to the message... */ msg_sz = size_object(ERL_MESSAGE_TERM(mp)); #ifdef USE_VM_PROBES utag_sz = size_object(ERL_MESSAGE_DT_UTAG(mp)); #endif token_sz = size_object(ERL_MESSAGE_TOKEN(mp)); hfrag = new_message_buffer(msg_sz #ifdef USE_VM_PROBES + utag_sz #endif + token_sz); hp = hfrag->mem; if (is_not_immed(ERL_MESSAGE_TERM(mp))) ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp), msg_sz, &hp, &hfrag->off_heap); if (is_not_immed(ERL_MESSAGE_TOKEN(mp))) ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp), token_sz, &hp, &hfrag->off_heap); #ifdef USE_VM_PROBES if (is_not_immed(ERL_MESSAGE_DT_UTAG(mp))) ERL_MESSAGE_DT_UTAG(mp) = copy_struct(ERL_MESSAGE_DT_UTAG(mp), utag_sz, &hp, &hfrag->off_heap); #endif mp->data.heap_frag = hfrag; reds += 1; } return reds; }
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *bp, Uint size, Eterm *brefs, Uint brefs_size) { #ifdef DEBUG int i; #endif #ifdef HARD_DEBUG ErlHeapFragment *dbg_bp; Eterm *dbg_brefs; Uint dbg_size; Uint dbg_tot_size; Eterm *dbg_hp; #endif ErlHeapFragment* nbp; #ifdef DEBUG { Uint off_sz = size < bp->used_size ? size : bp->used_size; for (i = 0; i < brefs_size; i++) { Eterm *ptr; if (is_immed(brefs[i])) continue; ptr = ptr_val(brefs[i]); ASSERT(&bp->mem[0] <= ptr && ptr < &bp->mem[0] + off_sz); } } #endif if (size >= (bp->used_size - bp->used_size / 16)) { bp->used_size = size; return bp; } #ifdef HARD_DEBUG dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size); dbg_bp = new_message_buffer(bp->used_size); dbg_hp = dbg_bp->mem; dbg_tot_size = 0; for (i = 0; i < brefs_size; i++) { dbg_size = size_object(brefs[i]); dbg_tot_size += dbg_size; dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp, &dbg_bp->off_heap); } ASSERT(dbg_tot_size == (size < bp->used_size ? size : bp->used_size)); #endif nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG, (void *) bp, ERTS_HEAP_FRAG_SIZE(bp->alloc_size), ERTS_HEAP_FRAG_SIZE(size)); if (bp != nbp) { Uint off_sz = size < nbp->used_size ? size : nbp->used_size; Eterm *sp = &bp->mem[0]; Eterm *ep = sp + off_sz; Sint offs = &nbp->mem[0] - sp; erts_offset_off_heap(&nbp->off_heap, offs, sp, ep); erts_offset_heap(&nbp->mem[0], off_sz, offs, sp, ep); if (brefs && brefs_size) erts_offset_heap_ptr(brefs, brefs_size, offs, sp, ep); #ifdef DEBUG for (i = 0; i < brefs_size; i++) { Eterm *ptr; if (is_immed(brefs[i])) continue; ptr = ptr_val(brefs[i]); ASSERT(&nbp->mem[0] <= ptr && ptr < &nbp->mem[0] + off_sz); } #endif } nbp->alloc_size = size; nbp->used_size = size; #ifdef HARD_DEBUG for (i = 0; i < brefs_size; i++) ASSERT(eq(dbg_brefs[i], brefs[i])); free_message_buffer(dbg_bp); erts_free(ERTS_ALC_T_UNDEF, dbg_brefs); #endif return nbp; }
ErtsMessage * erts_try_alloc_message_on_heap(Process *pp, erts_aint32_t *psp, ErtsProcLocks *plp, Uint sz, Eterm **hpp, ErlOffHeap **ohpp, int *on_heap_p) { int locked_main = 0; ErtsMessage *mp; ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ)); if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP) goto in_message_fragment; else if ( *plp & ERTS_PROC_LOCK_MAIN ) { try_on_heap: if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP) || (pp->flags & F_DISABLE_GC) || HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) { /* * The heap is either potentially in an inconsistent * state, or not large enough. */ if (locked_main) { *plp &= ~ERTS_PROC_LOCK_MAIN; erts_proc_unlock(pp, ERTS_PROC_LOCK_MAIN); } goto in_message_fragment; } *hpp = HEAP_TOP(pp); HEAP_TOP(pp) = *hpp + sz; *ohpp = &MSO(pp); mp = erts_alloc_message(0, NULL); mp->data.attached = NULL; *on_heap_p = !0; } else if (pp && erts_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) { locked_main = 1; *psp = erts_atomic32_read_nob(&pp->state); *plp |= ERTS_PROC_LOCK_MAIN; goto try_on_heap; } else { in_message_fragment: if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) { mp = erts_alloc_message(sz, hpp); *ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap; } else { mp = erts_alloc_message(0, NULL); if (!sz) { *hpp = NULL; *ohpp = NULL; } else { ErlHeapFragment *bp; bp = new_message_buffer(sz); *hpp = &bp->mem[0]; mp->data.heap_frag = bp; *ohpp = &bp->off_heap; } } *on_heap_p = 0; } return mp; }
int erts_decode_dist_message(Process *proc, ErtsProcLocks proc_locks, ErtsMessage *msgp, int force_off_heap) { ErtsHeapFactory factory; Eterm msg; ErlHeapFragment *bp; Sint need; int decode_in_heap_frag; decode_in_heap_frag = (force_off_heap || !(proc_locks & ERTS_PROC_LOCK_MAIN) || (proc->flags & F_OFF_HEAP_MSGQ)); if (msgp->data.dist_ext->heap_size >= 0) need = msgp->data.dist_ext->heap_size; else { need = erts_decode_dist_ext_size(msgp->data.dist_ext); if (need < 0) { /* bad msg; remove it... */ if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) { bp = erts_dist_ext_trailer(msgp->data.dist_ext); erts_cleanup_offheap(&bp->off_heap); } erts_free_dist_ext_copy(msgp->data.dist_ext); msgp->data.dist_ext = NULL; return 0; } msgp->data.dist_ext->heap_size = need; } if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) { bp = erts_dist_ext_trailer(msgp->data.dist_ext); need += bp->used_size; } if (decode_in_heap_frag) erts_factory_heap_frag_init(&factory, new_message_buffer(need)); else erts_factory_proc_prealloc_init(&factory, proc, need); ASSERT(msgp->data.dist_ext->heap_size >= 0); if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) { ErlHeapFragment *heap_frag; heap_frag = erts_dist_ext_trailer(msgp->data.dist_ext); ERL_MESSAGE_TOKEN(msgp) = copy_struct(ERL_MESSAGE_TOKEN(msgp), heap_frag->used_size, &factory.hp, factory.off_heap); erts_cleanup_offheap(&heap_frag->off_heap); } msg = erts_decode_dist_ext(&factory, msgp->data.dist_ext); ERL_MESSAGE_TERM(msgp) = msg; erts_free_dist_ext_copy(msgp->data.dist_ext); msgp->data.attached = NULL; if (is_non_value(msg)) { erts_factory_undo(&factory); return 0; } erts_factory_trim_and_close(&factory, msgp->m, ERL_MESSAGE_REF_ARRAY_SZ); ASSERT(!msgp->data.heap_frag); if (decode_in_heap_frag) msgp->data.heap_frag = factory.heap_frags; return 1; }
Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed) { ErtsLiteralArea *la; ErtsMessage *msgp; struct erl_off_heap_header* oh; char *literals; Uint lit_bsize; ErlHeapFragment *hfrag; la = ERTS_COPY_LITERAL_AREA(); if (!la) return am_ok; oh = la->off_heap; literals = (char *) &la->start[0]; lit_bsize = (char *) la->end - literals; /* * If a literal is in the message queue we make an explicit copy of * it and attach it to the heap fragment. Each message needs to be * self contained, we cannot save the literal in the old_heap or * any other heap than the message it self. */ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); for (msgp = c_p->msg.first; msgp; msgp = msgp->next) { ErlHeapFragment *hf; Uint lit_sz = 0; *redsp += 1; if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) hfrag = &msgp->hfrag; else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag) hfrag = msgp->data.heap_frag; else continue; /* Content on heap or in external term format... */ for (hf = hfrag; hf; hf = hf->next) { lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size], literals, lit_bsize); *redsp += 1; } *redsp += lit_sz / 16; /* Better value needed... */ if (lit_sz > 0) { ErlHeapFragment *bp = new_message_buffer(lit_sz); Eterm *hp = bp->mem; for (hf = hfrag; hf; hf = hf->next) { hfrag_literal_copy(&hp, &bp->off_heap, &hf->mem[0], &hf->mem[hf->used_size], literals, lit_bsize); hfrag = hf; } /* link new hfrag last */ ASSERT(hfrag->next == NULL); hfrag->next = bp; bp->next = NULL; } } if (gc_allowed) { /* * Current implementation first tests without * allowing GC, and then restarts the operation * allowing GC if it is needed. It is therfore * very likely that we will need the GC (although * this is not completely certain). We go for * the GC directly instead of scanning everything * one more time... */ goto literal_gc; } *redsp += 2; if (any_heap_ref_ptrs(&c_p->fvalue, &c_p->fvalue+1, literals, lit_bsize)) { c_p->freason = EXC_NULL; c_p->fvalue = NIL; c_p->ftrace = NIL; } if (any_heap_ref_ptrs(c_p->stop, c_p->hend, literals, lit_bsize)) goto literal_gc; *redsp += 1; #ifdef HIPE if (nstack_any_heap_ref_ptrs(c_p, literals, lit_bsize)) goto literal_gc; *redsp += 1; #endif if (any_heap_refs(c_p->heap, c_p->htop, literals, lit_bsize)) goto literal_gc; *redsp += 1; if (any_heap_refs(c_p->old_heap, c_p->old_htop, literals, lit_bsize)) goto literal_gc; /* Check dictionary */ *redsp += 1; if (c_p->dictionary) { Eterm* start = ERTS_PD_START(c_p->dictionary); Eterm* end = start + ERTS_PD_SIZE(c_p->dictionary); if (any_heap_ref_ptrs(start, end, literals, lit_bsize)) goto literal_gc; } /* Check heap fragments */ for (hfrag = c_p->mbuf; hfrag; hfrag = hfrag->next) { Eterm *hp, *hp_end; *redsp += 1; hp = &hfrag->mem[0]; hp_end = &hfrag->mem[hfrag->used_size]; if (any_heap_refs(hp, hp_end, literals, lit_bsize)) goto literal_gc; } /* * Message buffer fragments (matched messages) * - off heap lists should already have been moved into * process off heap structure. * - Check for literals */ for (msgp = c_p->msg_frag; msgp; msgp = msgp->next) { hfrag = erts_message_to_heap_frag(msgp); for (; hfrag; hfrag = hfrag->next) { Eterm *hp, *hp_end; *redsp += 1; hp = &hfrag->mem[0]; hp_end = &hfrag->mem[hfrag->used_size]; if (any_heap_refs(hp, hp_end, literals, lit_bsize)) goto literal_gc; } } return am_ok; literal_gc: if (!gc_allowed) return am_need_gc; if (c_p->flags & F_DISABLE_GC) return THE_NON_VALUE; FLAGS(c_p) |= F_NEED_FULLSWEEP; *redsp += erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg, c_p->arity, fcalls); erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize, oh); *redsp += lit_bsize / 64; /* Need, better value... */ return am_ok; }
static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls) { BeamInstr* start; char* literals; Uint lit_bsize; char* mod_start; Uint mod_size; Eterm* sp; int done_gc = 0; int need_gc = 0; ErtsMessage *msgp; ErlHeapFragment *hfrag; #define ERTS_ORDINARY_GC__ (1 << 0) #define ERTS_LITERAL_GC__ (1 << 1) /* * Pick up limits for the module. */ start = (BeamInstr*) modp->old.code_hdr; mod_start = (char *) start; mod_size = modp->old.code_length; /* * Check if current instruction or continuation pointer points into module. */ if (ErtsInArea(rp->i, mod_start, mod_size) || ErtsInArea(rp->cp, mod_start, mod_size)) { return am_true; } /* * Check all continuation pointers stored on the stack. */ for (sp = rp->stop; sp < STACK_START(rp); sp++) { if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) { return am_true; } } /* * Check all continuation pointers stored in stackdump * and clear exception stackdump if there is a pointer * to the module. */ if (rp->ftrace != NIL) { struct StackTrace *s; ASSERT(is_list(rp->ftrace)); s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace))); if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) || (s->current && ErtsInArea(s->current, mod_start, mod_size))) { rp->freason = EXC_NULL; rp->fvalue = NIL; rp->ftrace = NIL; } else { int i; for (i = 0; i < s->depth; i++) { if (ErtsInArea(s->trace[i], mod_start, mod_size)) { rp->freason = EXC_NULL; rp->fvalue = NIL; rp->ftrace = NIL; break; } } } } if (rp->flags & F_DISABLE_GC) { /* * Cannot proceed. Process has disabled gc in order to * safely leave inconsistent data on the heap and/or * off heap lists. Need to wait for gc to be enabled * again. */ return THE_NON_VALUE; } /* * Message queue can contains funs, and may contain * literals. If we got references to this module from the message * queue. * * If a literal is in the message queue we maka an explicit copy of * and attach it to the heap fragment. Each message needs to be * self contained, we cannot save the literal in the old_heap or * any other heap than the message it self. */ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_MSGQ); ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ); literals = (char*) modp->old.code_hdr->literals_start; lit_bsize = (char*) modp->old.code_hdr->literals_end - literals; for (msgp = rp->msg.first; msgp; msgp = msgp->next) { if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) hfrag = &msgp->hfrag; else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag) hfrag = msgp->data.heap_frag; else continue; { ErlHeapFragment *hf; Uint lit_sz = 0; for (hf=hfrag; hf; hf = hf->next) { if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)) return am_true; lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size], literals, lit_bsize); } if (lit_sz > 0) { ErlHeapFragment *bp = new_message_buffer(lit_sz); Eterm *hp = bp->mem; for (hf=hfrag; hf; hf = hf->next) { hfrag_literal_copy(&hp, &bp->off_heap, &hf->mem[0], &hf->mem[hf->used_size], literals, lit_bsize); hfrag=hf; } /* link new hfrag last */ ASSERT(hfrag->next == NULL); hfrag->next = bp; bp->next = NULL; } } } while (1) { /* Check heap, stack etc... */ if (check_mod_funs(rp, &rp->off_heap, mod_start, mod_size)) goto try_gc; if (!(flags & ERTS_CPC_COPY_LITERALS)) { /* Process ok. May contain old literals but we will be called * again before module is purged. */ return am_false; } if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, literals, lit_bsize)) { rp->freason = EXC_NULL; rp->fvalue = NIL; rp->ftrace = NIL; } if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize)) goto try_literal_gc; #ifdef HIPE if (nstack_any_heap_ref_ptrs(rp, literals, lit_bsize)) goto try_literal_gc; #endif if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize)) goto try_literal_gc; if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize)) goto try_literal_gc; /* Check dictionary */ if (rp->dictionary) { Eterm* start = ERTS_PD_START(rp->dictionary); Eterm* end = start + ERTS_PD_SIZE(rp->dictionary); if (any_heap_ref_ptrs(start, end, literals, lit_bsize)) goto try_literal_gc; } /* Check heap fragments */ for (hfrag = rp->mbuf; hfrag; hfrag = hfrag->next) { Eterm *hp, *hp_end; /* Off heap lists should already have been moved into process */ ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)); hp = &hfrag->mem[0]; hp_end = &hfrag->mem[hfrag->used_size]; if (any_heap_refs(hp, hp_end, literals, lit_bsize)) goto try_literal_gc; } /* * Message buffer fragments (matched messages) * - off heap lists should already have been moved into * process off heap structure. * - Check for literals */ for (msgp = rp->msg_frag; msgp; msgp = msgp->next) { hfrag = erts_message_to_heap_frag(msgp); for (; hfrag; hfrag = hfrag->next) { Eterm *hp, *hp_end; ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)); hp = &hfrag->mem[0]; hp_end = &hfrag->mem[hfrag->used_size]; if (any_heap_refs(hp, hp_end, literals, lit_bsize)) goto try_literal_gc; } } return am_false; try_literal_gc: need_gc |= ERTS_LITERAL_GC__; try_gc: need_gc |= ERTS_ORDINARY_GC__; if ((done_gc & need_gc) == need_gc) return am_true; if (!(flags & ERTS_CPC_ALLOW_GC)) return am_aborted; need_gc &= ~done_gc; /* * Try to get rid of literals by by garbage collecting. * Clear both fvalue and ftrace. */ rp->freason = EXC_NULL; rp->fvalue = NIL; rp->ftrace = NIL; if (need_gc & ERTS_ORDINARY_GC__) { FLAGS(rp) |= F_NEED_FULLSWEEP; *redsp += erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity, fcalls); done_gc |= ERTS_ORDINARY_GC__; } if (need_gc & ERTS_LITERAL_GC__) { struct erl_off_heap_header* oh; oh = modp->old.code_hdr->literals_off_heap; *redsp += lit_bsize / 64; /* Need, better value... */ erts_garbage_collect_literals(rp, (Eterm*)literals, lit_bsize, oh); done_gc |= ERTS_LITERAL_GC__; } need_gc = 0; } #undef ERTS_ORDINARY_GC__ #undef ERTS_LITERAL_GC__ }
Sint erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, Eterm message, unsigned flags) { Uint msize; ErlHeapFragment* bp = NULL; Eterm token = NIL; Sint res = 0; #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif BM_STOP_TIMER(system); BM_MESSAGE(message,sender,receiver); BM_START_TIMER(send); #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id); erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id); } #endif if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; Eterm stoken = SEQ_TRACE_TOKEN(sender); Uint seq_trace_size = 0; #ifdef USE_VM_PROBES Uint dt_utag_size = 0; Eterm utag = NIL; #endif BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); #ifdef USE_VM_PROBES if (stoken != am_have_dt_utag) { #endif seq_trace_update_send(sender); seq_trace_output(stoken, message, SEQ_TRACE_SEND, receiver->common.id, sender); seq_trace_size = 6; /* TUPLE5 */ #ifdef USE_VM_PROBES } if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { dt_utag_size = size_object(DT_UTAG(sender)); } else if (stoken == am_have_dt_utag ) { stoken = NIL; } #endif bp = new_message_buffer(msize + seq_trace_size #ifdef USE_VM_PROBES + dt_utag_size #endif ); hp = bp->mem; BM_SWAP_TIMER(send,copy); token = copy_struct(stoken, seq_trace_size, &hp, &bp->off_heap); message = copy_struct(message, msize, &hp, &bp->off_heap); #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, &bp->off_heap); #ifdef DTRACE_TAG_HARDDEBUG erts_fprintf(stderr, "Dtrace -> (%T) Spreading tag (%T) with " "message %T!\r\n",sender->common.id, utag, message); #endif } #endif BM_MESSAGE_COPIED(msize); BM_SWAP_TIMER(copy,send); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_send)) { if (stoken != NIL && stoken != am_have_dt_utag) { tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken)); } DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); } #endif res = queue_message(NULL, receiver, receiver_locks, NULL, bp, message, token #ifdef USE_VM_PROBES , utag #endif ); BM_SWAP_TIMER(send,system); } else if (sender == receiver) { /* Drop message if receiver has a pending exit ... */ #ifdef ERTS_SMP ErtsProcLocks need_locks = (~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS)); if (need_locks) { *receiver_locks |= need_locks; if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) { if (need_locks == ERTS_PROC_LOCK_MSGQ) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks = ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(receiver, need_locks); } } if (!ERTS_PROC_PENDING_EXIT(receiver)) #endif { ErlMessage* mp = message_alloc(); DTRACE6(message_send, sender_name, receiver_name, size_object(message), tok_label, tok_lastcnt, tok_serial); mp->data.attached = NULL; ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = NIL; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; #endif mp->next = NULL; /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); res = receiver->msg.len; if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } } BM_SWAP_TIMER(send,system); } else { ErlOffHeap *ohp; Eterm *hp; erts_aint32_t state; BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); hp = erts_alloc_message_heap_state(msize, &bp, &ohp, receiver, receiver_locks, &state); BM_SWAP_TIMER(send,copy); message = copy_struct(message, msize, &hp, ohp); BM_MESSAGE_COPIED(msz); BM_SWAP_TIMER(copy,send); DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); res = queue_message(sender, receiver, receiver_locks, &state, bp, message, token #ifdef USE_VM_PROBES , NIL #endif ); BM_SWAP_TIMER(send,system); } return res; }
/* * Moves content of message buffer attached to a message into a heap. * The message buffer is deallocated. */ void erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg) { struct erl_off_heap_header* oh; Eterm term, token, *fhp, *hp; Sint offs; Uint sz; ErlHeapFragment *bp; #ifdef USE_VM_PROBES Eterm utag; #endif #ifdef HARD_DEBUG struct erl_off_heap_header* dbg_oh_start = off_heap->first; Eterm dbg_term, dbg_token; ErlHeapFragment *dbg_bp; Uint *dbg_hp, *dbg_thp_start; Uint dbg_term_sz, dbg_token_sz; #ifdef USE_VM_PROBES Eterm dbg_utag; Uint dbg_utag_sz; #endif #endif bp = msg->data.heap_frag; term = ERL_MESSAGE_TERM(msg); token = ERL_MESSAGE_TOKEN(msg); #ifdef USE_VM_PROBES utag = ERL_MESSAGE_DT_UTAG(msg); #endif if (!bp) { #ifdef USE_VM_PROBES ASSERT(is_immed(term) && is_immed(token) && is_immed(utag)); #else ASSERT(is_immed(term) && is_immed(token)); #endif return; } #ifdef HARD_DEBUG dbg_term_sz = size_object(term); dbg_token_sz = size_object(token); dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz); #ifdef USE_VM_PROBES dbg_utag_sz = size_object(utag); dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz + dbg_utag_sz ); #endif /*ASSERT(dbg_term_sz + dbg_token_sz == erts_msg_used_frag_sz(msg)); Copied size may be smaller due to removed SubBins's or garbage. Copied size may be larger due to duplicated shared terms. */ dbg_hp = dbg_bp->mem; dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap); dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap); #ifdef USE_VM_PROBES dbg_utag = copy_struct(utag, dbg_utag_sz, &dbg_hp, &dbg_bp->off_heap); #endif dbg_thp_start = *hpp; #endif if (bp->next != NULL) { move_multi_frags(hpp, off_heap, bp, msg->m, #ifdef USE_VM_PROBES 3 #else 2 #endif ); goto copy_done; } OH_OVERHEAD(off_heap, bp->off_heap.overhead); sz = bp->used_size; ASSERT(is_immed(term) || in_heapfrag(ptr_val(term),bp)); ASSERT(is_immed(token) || in_heapfrag(ptr_val(token),bp)); fhp = bp->mem; hp = *hpp; offs = hp - fhp; oh = NULL; while (sz--) { Uint cpy_sz; Eterm val = *fhp++; switch (primary_tag(val)) { case TAG_PRIMARY_IMMED1: *hp++ = val; break; case TAG_PRIMARY_LIST: case TAG_PRIMARY_BOXED: ASSERT(in_heapfrag(ptr_val(val), bp)); *hp++ = offset_ptr(val, offs); break; case TAG_PRIMARY_HEADER: *hp++ = val; switch (val & _HEADER_SUBTAG_MASK) { case ARITYVAL_SUBTAG: break; case REFC_BINARY_SUBTAG: case FUN_SUBTAG: case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: oh = (struct erl_off_heap_header*) (hp-1); cpy_sz = thing_arityval(val); goto cpy_words; default: cpy_sz = header_arity(val); cpy_words: ASSERT(sz >= cpy_sz); sz -= cpy_sz; while (cpy_sz >= 8) { cpy_sz -= 8; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; } switch (cpy_sz) { case 7: *hp++ = *fhp++; case 6: *hp++ = *fhp++; case 5: *hp++ = *fhp++; case 4: *hp++ = *fhp++; case 3: *hp++ = *fhp++; case 2: *hp++ = *fhp++; case 1: *hp++ = *fhp++; default: break; } if (oh) { /* Add to offheap list */ oh->next = off_heap->first; off_heap->first = oh; ASSERT(*hpp <= (Eterm*)oh); ASSERT(hp > (Eterm*)oh); oh = NULL; } break; } break; } } ASSERT(bp->used_size == hp - *hpp); *hpp = hp; if (is_not_immed(token)) { ASSERT(in_heapfrag(ptr_val(token), bp)); ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs); #ifdef HARD_DEBUG ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg))); ASSERT(hp > ptr_val(ERL_MESSAGE_TOKEN(msg))); #endif } if (is_not_immed(term)) { ASSERT(in_heapfrag(ptr_val(term),bp)); ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs); #ifdef HARD_DEBUG ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg))); ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg))); #endif } #ifdef USE_VM_PROBES if (is_not_immed(utag)) { ASSERT(in_heapfrag(ptr_val(utag), bp)); ERL_MESSAGE_DT_UTAG(msg) = offset_ptr(utag, offs); #ifdef HARD_DEBUG ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_DT_UTAG(msg))); ASSERT(hp > ptr_val(ERL_MESSAGE_DT_UTAG(msg))); #endif } #endif copy_done: #ifdef HARD_DEBUG { int i, j; ErlHeapFragment* frag; { struct erl_off_heap_header* dbg_oh = off_heap->first; i = j = 0; while (dbg_oh != dbg_oh_start) { dbg_oh = dbg_oh->next; i++; } for (frag=bp; frag; frag=frag->next) { dbg_oh = frag->off_heap.first; while (dbg_oh) { dbg_oh = dbg_oh->next; j++; } } ASSERT(i == j); } } #endif bp->off_heap.first = NULL; free_message_buffer(bp); msg->data.heap_frag = NULL; #ifdef HARD_DEBUG ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term)); ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token)); #ifdef USE_VM_PROBES ASSERT(eq(ERL_MESSAGE_DT_UTAG(msg), dbg_utag)); #endif free_message_buffer(dbg_bp); #endif }
Eterm erts_msg_distext2heap(Process *pp, ErtsProcLocks *plcksp, ErlHeapFragment **bpp, Eterm *tokenp, ErtsDistExternal *dist_extp) { Eterm msg; Uint tok_sz = 0; Eterm *hp = NULL; Eterm *hp_end = NULL; ErlOffHeap *ohp; Sint sz; *bpp = NULL; sz = erts_decode_dist_ext_size(dist_extp); if (sz < 0) goto decode_error; if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); tok_sz = heap_frag->used_size; sz += tok_sz; } if (pp) hp = erts_alloc_message_heap(sz, bpp, &ohp, pp, plcksp); else { *bpp = new_message_buffer(sz); hp = (*bpp)->mem; ohp = &(*bpp)->off_heap; } hp_end = hp + sz; msg = erts_decode_dist_ext(&hp, ohp, dist_extp); if (is_non_value(msg)) goto decode_error; if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); *tokenp = copy_struct(*tokenp, tok_sz, &hp, ohp); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_extp); if (hp_end != hp) { if (!(*bpp)) { HRelease(pp, hp_end, hp); } else { Uint final_size = hp - &(*bpp)->mem[0]; Eterm brefs[2] = {msg, *tokenp}; ASSERT(sz - (hp_end - hp) == final_size); *bpp = erts_resize_message_buffer(*bpp, final_size, &brefs[0], 2); msg = brefs[0]; *tokenp = brefs[1]; } } return msg; decode_error: if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_extp); if (*bpp) { free_message_buffer(*bpp); *bpp = NULL; } else if (hp) { HRelease(pp, hp_end, hp); } return THE_NON_VALUE; }
void erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, Eterm message, unsigned flags) { Uint msize; ErlHeapFragment* bp = NULL; Eterm token = NIL; BM_STOP_TIMER(system); BM_MESSAGE(message,sender,receiver); BM_START_TIMER(send); if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); seq_trace_update_send(sender); seq_trace_output(SEQ_TRACE_TOKEN(sender), message, SEQ_TRACE_SEND, receiver->id, sender); bp = new_message_buffer(msize + 6 /* TUPLE5 */); hp = bp->mem; BM_SWAP_TIMER(send,copy); token = copy_struct(SEQ_TRACE_TOKEN(sender), 6 /* TUPLE5 */, &hp, &bp->off_heap); message = copy_struct(message, msize, &hp, &bp->off_heap); BM_MESSAGE_COPIED(msize); BM_SWAP_TIMER(copy,send); erts_queue_message(receiver, receiver_locks, bp, message, token); BM_SWAP_TIMER(send,system); #ifdef HYBRID } else { ErlMessage* mp = message_alloc(); BM_SWAP_TIMER(send,copy); #ifdef INCREMENTAL /* TODO: During GC activate processes if the message relies in * the fromspace and the sender is active. During major * collections add the message to the gray stack if it relies * in the old generation and the sender is active and the * receiver is inactive. if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) && (ptr_val(message) >= inc_fromspc && ptr_val(message) < inc_fromend) && INC_IS_ACTIVE(sender)) INC_ACTIVATE(receiver); else if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) && (ptr_val(message) >= global_old_heap && ptr_val(message) < global_old_hend) && INC_IS_ACTIVE(sender) && !INC_IS_ACTIVE(receiver)) Mark message in blackmap and add it to the gray stack */ if (!IS_CONST(message)) INC_ACTIVATE(receiver); #endif LAZY_COPY(sender,message); BM_SWAP_TIMER(copy,send); ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = NIL; mp->next = NULL; LINK_MESSAGE(receiver, mp); ACTIVATE(receiver); if (receiver->status == P_WAITING) { erts_add_to_runq(receiver); } else if (receiver->status == P_SUSPENDED) { receiver->rstatus = P_RUNABLE; } if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } BM_SWAP_TIMER(send,system); return; #else } else if (sender == receiver) {
/* * Moves content of message buffer attached to a message into a heap. * The message buffer is deallocated. */ void erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg) { /* Unions for typecasts avoids warnings about type-punned pointers and aliasing */ union { Uint** upp; ProcBin **pbpp; ErlFunThing **efpp; ExternalThing **etpp; } oh_list_pp, oh_el_next_pp; union { Uint *up; ProcBin *pbp; ErlFunThing *efp; ExternalThing *etp; } oh_el_p; Eterm term, token, *fhp, *hp; Sint offs; Uint sz; ErlHeapFragment *bp; #ifdef HARD_DEBUG ProcBin *dbg_mso_start = off_heap->mso; ErlFunThing *dbg_fun_start = off_heap->funs; ExternalThing *dbg_external_start = off_heap->externals; Eterm dbg_term, dbg_token; ErlHeapFragment *dbg_bp; Uint *dbg_hp, *dbg_thp_start; Uint dbg_term_sz, dbg_token_sz; #endif bp = msg->data.heap_frag; term = ERL_MESSAGE_TERM(msg); token = ERL_MESSAGE_TOKEN(msg); if (!bp) { ASSERT(is_immed(term) && is_immed(token)); return; } #ifdef HARD_DEBUG dbg_term_sz = size_object(term); dbg_token_sz = size_object(token); ASSERT(bp->size == dbg_term_sz + dbg_token_sz); dbg_bp = new_message_buffer(bp->size); dbg_hp = dbg_bp->mem; dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap); dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap); dbg_thp_start = *hpp; #endif ASSERT(bp); msg->data.attached = NULL; off_heap->overhead += bp->off_heap.overhead; sz = bp->size; #ifdef DEBUG if (is_not_immed(term)) { ASSERT(bp->mem <= ptr_val(term)); ASSERT(bp->mem + bp->size > ptr_val(term)); } if (is_not_immed(token)) { ASSERT(bp->mem <= ptr_val(token)); ASSERT(bp->mem + bp->size > ptr_val(token)); } #endif fhp = bp->mem; hp = *hpp; offs = hp - fhp; oh_list_pp.upp = NULL; oh_el_next_pp.upp = NULL; /* Shut up compiler warning */ oh_el_p.up = NULL; /* Shut up compiler warning */ while (sz--) { Uint cpy_sz; Eterm val = *fhp++; switch (primary_tag(val)) { case TAG_PRIMARY_IMMED1: *hp++ = val; break; case TAG_PRIMARY_LIST: case TAG_PRIMARY_BOXED: ASSERT(bp->mem <= ptr_val(val)); ASSERT(bp->mem + bp->size > ptr_val(val)); *hp++ = offset_ptr(val, offs); break; case TAG_PRIMARY_HEADER: *hp++ = val; switch (val & _HEADER_SUBTAG_MASK) { case ARITYVAL_SUBTAG: break; case REFC_BINARY_SUBTAG: oh_list_pp.pbpp = &off_heap->mso; oh_el_p.up = (hp-1); oh_el_next_pp.pbpp = &(oh_el_p.pbp)->next; cpy_sz = thing_arityval(val); goto cpy_words; case FUN_SUBTAG: #ifndef HYBRID oh_list_pp.efpp = &off_heap->funs; oh_el_p.up = (hp-1); oh_el_next_pp.efpp = &(oh_el_p.efp)->next; #endif cpy_sz = thing_arityval(val); goto cpy_words; case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: oh_list_pp.etpp = &off_heap->externals; oh_el_p.up = (hp-1); oh_el_next_pp.etpp = &(oh_el_p.etp)->next; cpy_sz = thing_arityval(val); goto cpy_words; default: cpy_sz = header_arity(val); cpy_words: sz -= cpy_sz; while (cpy_sz >= 8) { cpy_sz -= 8; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; } switch (cpy_sz) { case 7: *hp++ = *fhp++; case 6: *hp++ = *fhp++; case 5: *hp++ = *fhp++; case 4: *hp++ = *fhp++; case 3: *hp++ = *fhp++; case 2: *hp++ = *fhp++; case 1: *hp++ = *fhp++; default: break; } if (oh_list_pp.upp) { #ifdef HARD_DEBUG Uint *dbg_old_oh_list_p = *oh_list_pp.upp; #endif /* Add to offheap list */ *oh_el_next_pp.upp = *oh_list_pp.upp; *oh_list_pp.upp = oh_el_p.up; ASSERT(*hpp <= oh_el_p.up); ASSERT(hp > oh_el_p.up); #ifdef HARD_DEBUG switch (val & _HEADER_SUBTAG_MASK) { case REFC_BINARY_SUBTAG: ASSERT(off_heap->mso == *oh_list_pp.pbpp); ASSERT(off_heap->mso->next == (ProcBin *) dbg_old_oh_list_p); break; #ifndef HYBRID case FUN_SUBTAG: ASSERT(off_heap->funs == *oh_list_pp.efpp); ASSERT(off_heap->funs->next == (ErlFunThing *) dbg_old_oh_list_p); break; #endif case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: ASSERT(off_heap->externals == *oh_list_pp.etpp); ASSERT(off_heap->externals->next == (ExternalThing *) dbg_old_oh_list_p); break; default: ASSERT(0); } #endif oh_list_pp.upp = NULL; } break; } break; } } ASSERT(bp->size == hp - *hpp); *hpp = hp; if (is_not_immed(token)) { ASSERT(bp->mem <= ptr_val(token)); ASSERT(bp->mem + bp->size > ptr_val(token)); ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs); #ifdef HARD_DEBUG ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg))); ASSERT(hp > ptr_val(ERL_MESSAGE_TOKEN(msg))); #endif } if (is_not_immed(term)) { ASSERT(bp->mem <= ptr_val(term)); ASSERT(bp->mem + bp->size > ptr_val(term)); ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs); #ifdef HARD_DEBUG ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg))); ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg))); #endif } #ifdef HARD_DEBUG { int i, j; { ProcBin *mso = off_heap->mso; i = j = 0; while (mso != dbg_mso_start) { mso = mso->next; i++; } mso = bp->off_heap.mso; while (mso) { mso = mso->next; j++; } ASSERT(i == j); } { ErlFunThing *fun = off_heap->funs; i = j = 0; while (fun != dbg_fun_start) { fun = fun->next; i++; } fun = bp->off_heap.funs; while (fun) { fun = fun->next; j++; } ASSERT(i == j); } { ExternalThing *external = off_heap->externals; i = j = 0; while (external != dbg_external_start) { external = external->next; i++; } external = bp->off_heap.externals; while (external) { external = external->next; j++; } ASSERT(i == j); } } #endif bp->off_heap.mso = NULL; #ifndef HYBRID bp->off_heap.funs = NULL; #endif bp->off_heap.externals = NULL; free_message_buffer(bp); #ifdef HARD_DEBUG ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term)); ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token)); free_message_buffer(dbg_bp); #endif }