static Uint hfrag_literal_size(Eterm* start, Eterm* end, char* lit_start, Uint lit_size) { Eterm* p; Eterm val; Uint sz = 0; for (p = start; p < end; p++) { val = *p; switch (primary_tag(val)) { case TAG_PRIMARY_BOXED: case TAG_PRIMARY_LIST: if (ErtsInArea(val, lit_start, lit_size)) { sz += size_object(val); } break; case TAG_PRIMARY_HEADER: if (!header_is_transparent(val)) { Eterm* new_p; if (header_is_bin_matchstate(val)) { ErlBinMatchState *ms = (ErlBinMatchState*) p; ErlBinMatchBuffer *mb = &(ms->mb); if (ErtsInArea(mb->orig, lit_start, lit_size)) { sz += size_object(mb->orig); } } new_p = p + thing_arityval(val); ASSERT(start <= new_p && new_p < end); p = new_p; } } } return sz; }
void erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, Eterm reason, Eterm token) { Eterm mess; Eterm save; Eterm from_copy; Uint sz_reason; Uint sz_token; Uint sz_from; Eterm* hp; Eterm temptoken; ErlHeapFragment* bp = NULL; if (token != NIL #ifdef USE_VM_PROBES && token != am_have_dt_utag #endif ) { ASSERT(is_tuple(token)); sz_reason = size_object(reason); sz_token = size_object(token); sz_from = size_object(from); bp = new_message_buffer(sz_reason + sz_from + sz_token + 4); hp = bp->mem; mess = copy_struct(reason, sz_reason, &hp, &bp->off_heap); from_copy = copy_struct(from, sz_from, &hp, &bp->off_heap); save = TUPLE3(hp, am_EXIT, from_copy, mess); hp += 4; /* the trace token must in this case be updated by the caller */ seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL); temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap); erts_queue_message(to, to_locksp, bp, save, temptoken); } else { ErlOffHeap *ohp; sz_reason = size_object(reason); sz_from = IS_CONST(from) ? 0 : size_object(from); hp = erts_alloc_message_heap(sz_reason+sz_from+4, &bp, &ohp, to, to_locksp); mess = copy_struct(reason, sz_reason, &hp, ohp); from_copy = (IS_CONST(from) ? from : copy_struct(from, sz_from, &hp, ohp)); save = TUPLE3(hp, am_EXIT, from_copy, mess); erts_queue_message(to, to_locksp, bp, save, NIL); } }
BIF_RETTYPE port_set_data_2(BIF_ALIST_2) { Port* prt; Eterm portid = BIF_ARG_1; Eterm data = BIF_ARG_2; prt = id_or_name2port(BIF_P, portid); if (!prt) { BIF_ERROR(BIF_P, BADARG); } if (prt->bp != NULL) { free_message_buffer(prt->bp); prt->bp = NULL; } if (IS_CONST(data)) { prt->data = data; } else { Uint size; ErlHeapFragment* bp; Eterm* hp; size = size_object(data); prt->bp = bp = new_message_buffer(size); hp = bp->mem; prt->data = copy_struct(data, size, &hp, &bp->off_heap); } erts_smp_port_unlock(prt); BIF_RET(am_true); }
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term) { Uint sz; Eterm* hp; sz = size_object(src_term); hp = alloc_heap(dst_env, sz); return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc)); }
static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp, Eterm *start, Eterm *end, char *lit_start, Uint lit_size) { Eterm* p; Eterm val; Uint sz; for (p = start; p < end; p++) { val = *p; switch (primary_tag(val)) { case TAG_PRIMARY_BOXED: case TAG_PRIMARY_LIST: if (ErtsInArea(val, lit_start, lit_size)) { sz = size_object(val); val = copy_struct(val, sz, hpp, ohp); *p = val; } break; case TAG_PRIMARY_HEADER: if (!header_is_transparent(val)) { Eterm* new_p; /* matchstate in message, not possible. */ if (header_is_bin_matchstate(val)) { ErlBinMatchState *ms = (ErlBinMatchState*) p; ErlBinMatchBuffer *mb = &(ms->mb); if (ErtsInArea(mb->orig, lit_start, lit_size)) { sz = size_object(mb->orig); mb->orig = copy_struct(mb->orig, sz, hpp, ohp); } } new_p = p + thing_arityval(val); ASSERT(start <= new_p && new_p < end); p = new_p; } } } }
BIF_RETTYPE erts_debug_flat_size_1(BIF_ALIST_1) { Process* p = BIF_P; Eterm term = BIF_ARG_1; Uint size = size_object(term); if (IS_USMALL(0, size)) { BIF_RET(make_small(size)); } else { Eterm* hp = HAlloc(p, BIG_UINT_HEAP_SIZE); BIF_RET(uint_to_big(size, hp)); } }
/* * Copy object "obj" to process p. */ Eterm copy_object(Eterm obj, Process* to) { Uint size = size_object(obj); Eterm* hp = HAlloc(to, size); Eterm res; res = copy_struct(obj, size, &hp, &to->off_heap); #ifdef DEBUG if (eq(obj, res) == 0) { erl_exit(ERTS_ABORT_EXIT, "copy not equal to source\n"); } #endif return res; }
BIF_RETTYPE port_set_data_2(BIF_ALIST_2) { /* * This is not a signal. See comment above. */ erts_aint_t data; Port* prt; prt = data_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_ERROR(BIF_P, BADARG); if (is_immed(BIF_ARG_2)) { data = (erts_aint_t) BIF_ARG_2; ASSERT((data & 0x3) != 0); } else { ErtsPortDataHeap *pdhp; Uint hsize; Eterm *hp; hsize = size_object(BIF_ARG_2); pdhp = erts_alloc(ERTS_ALC_T_PORT_DATA_HEAP, sizeof(ErtsPortDataHeap) + (hsize-1)*sizeof(Eterm)); hp = &pdhp->heap[0]; pdhp->off_heap.first = NULL; pdhp->off_heap.overhead = 0; pdhp->hsize = hsize; pdhp->data = copy_struct(BIF_ARG_2, hsize, &hp, &pdhp->off_heap); data = (erts_aint_t) pdhp; ASSERT((data & 0x3) == 0); } data = erts_smp_atomic_xchg_wb(&prt->data, data); if (data == (erts_aint_t)NULL) { /* Port terminated by racing thread */ data = erts_smp_atomic_xchg_wb(&prt->data, data); ASSERT(data != (erts_aint_t)NULL); cleanup_old_port_data(data); BIF_ERROR(BIF_P, BADARG); } cleanup_old_port_data(data); BIF_RET(am_true); }
static void insert_node_referrer(ReferredNode *referred_node, int type, Eterm id) { NodeReferrer *nrp; for(nrp = referred_node->referrers; nrp; nrp = nrp->next) if(EQ(id, nrp->id)) break; if(!nrp) { nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(NodeReferrer)); nrp->next = referred_node->referrers; referred_node->referrers = nrp; if(IS_CONST(id)) nrp->id = id; else { Uint *hp = &nrp->id_heap[0]; ASSERT(is_big(id) || is_tuple(id)); nrp->id = copy_struct(id, size_object(id), &hp, NULL); } nrp->heap_ref = 0; nrp->link_ref = 0; nrp->monitor_ref = 0; nrp->ets_ref = 0; nrp->bin_ref = 0; nrp->timer_ref = 0; nrp->system_ref = 0; } switch (type) { case HEAP_REF: nrp->heap_ref++; break; case LINK_REF: nrp->link_ref++; break; case ETS_REF: nrp->ets_ref++; break; case BIN_REF: nrp->bin_ref++; break; case MONITOR_REF: nrp->monitor_ref++; break; case TIMER_REF: nrp->timer_ref++; break; case SYSTEM_REF: nrp->system_ref++; break; default: ASSERT(0); } }
static void insert_dist_referrer(ReferredDist *referred_dist, int type, Eterm id, Uint creation) { DistReferrer *drp; for(drp = referred_dist->referrers; drp; drp = drp->next) if(id == drp->id && (type == CTRL_REF || creation == drp->creation)) break; if(!drp) { drp = (DistReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(DistReferrer)); drp->next = referred_dist->referrers; referred_dist->referrers = drp; if(IS_CONST(id)) drp->id = id; else { Uint *hp = &drp->id_heap[0]; ASSERT(is_tuple(id)); drp->id = copy_struct(id, size_object(id), &hp, NULL); } drp->creation = creation; drp->heap_ref = 0; drp->node_ref = 0; drp->ctrl_ref = 0; drp->system_ref = 0; } switch (type) { case NODE_REF: drp->node_ref++; break; case CTRL_REF: drp->ctrl_ref++; break; case HEAP_REF: drp->heap_ref++; break; case SYSTEM_REF: drp->system_ref++; break; default: ASSERT(0); } }
/* * Copy object "obj" to process p. */ Eterm copy_object(Eterm obj, Process* to) { Uint size = size_object(obj); Eterm* hp = HAlloc(to, size); Eterm res; #ifdef USE_VM_PROBES if (DTRACE_ENABLED(copy_object)) { DTRACE_CHARBUF(proc_name, 64); erts_snprintf(proc_name, sizeof(proc_name), "%T", to->common.id); DTRACE2(copy_object, proc_name, size); } #endif res = copy_struct(obj, size, &hp, &to->off_heap); #ifdef DEBUG if (eq(obj, res) == 0) { erl_exit(ERTS_ABORT_EXIT, "copy not equal to source\n"); } #endif return res; }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks rcvr_locks, ErtsDistExternal *dist_ext, Eterm token, Eterm from) { ErtsMessage* mp; #ifdef USE_VM_PROBES Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif erts_aint_t state; ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = erts_alloc_message(0, NULL); mp->data.dist_ext = dist_ext; ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; if (token == am_have_dt_utag) ERL_MESSAGE_TOKEN(mp) = NIL; else #endif ERL_MESSAGE_TOKEN(mp) = token; if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; ErtsProcLocks unlocks = rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (unlocks) { erts_proc_unlock(rcvr, unlocks); need_locks |= unlocks; } erts_proc_lock(rcvr, need_locks); } } state = erts_atomic32_read_acqb(&rcvr->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } else if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { if (from == am_Empty) from = dist_ext->dep->sysname; /* Ahh... need to decode it in order to trace it... */ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0)) erts_free_message(mp); else { Eterm msg = ERL_MESSAGE_TERM(mp); token = ERL_MESSAGE_TOKEN(mp); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (have_seqtrace(token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } DTRACE6(message_queued, receiver_name, size_object(msg), rcvr->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif erts_queue_message(rcvr, rcvr_locks, mp, msg, from); } } else { /* Enqueue message on external format */ #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (have_seqtrace(token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } /* * TODO: We don't know the real size of the external message here. * -1 will appear to a D script as 4294967295. */ DTRACE6(message_queued, receiver_name, -1, rcvr->msg.len + 1, tok_label, tok_lastcnt, tok_serial); } #endif LINK_MESSAGE(rcvr, mp, &mp->next, 1); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, rcvr_locks ); } }
/* Add messages last in message queue */ static Sint queue_messages(Process* receiver, erts_aint32_t *receiver_state, ErtsProcLocks receiver_locks, ErtsMessage* first, ErtsMessage** last, Uint len, Eterm from) { ErtsTracingEvent* te; Sint res; int locked_msgq = 0; erts_aint32_t state; ASSERT(is_value(ERL_MESSAGE_TERM(first))); ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined || ERL_MESSAGE_TOKEN(first) == NIL || is_tuple(ERL_MESSAGE_TOKEN(first))); #ifdef ERTS_ENABLE_LOCK_CHECK ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || receiver_locks == erts_proc_lc_my_proc_locks(receiver)); #endif if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks; if (receiver_state) state = *receiver_state; else state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (need_locks) { erts_proc_unlock(receiver, need_locks); } need_locks |= ERTS_PROC_LOCK_MSGQ; erts_proc_lock(receiver, need_locks); } locked_msgq = 1; } state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { exiting: /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_cleanup_messages(first); return 0; } res = receiver->msg.len; if (receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ res += receiver->msg_inq.len; ERTS_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, first, last, len); } else { LINK_MESSAGE(receiver, first, last, len); } if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE) && (te = &erts_receive_tracing[erts_active_bp_ix()], te->on)) { ErtsMessage *msg = first; #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg); dtrace_proc_str(receiver, receiver_name); if (seq_trace_token != NIL && is_tuple(seq_trace_token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token)); } DTRACE6(message_queued, receiver_name, size_object(ERL_MESSAGE_TERM(msg)), receiver->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif while (msg) { trace_receive(receiver, from, ERL_MESSAGE_TERM(msg), te); msg = msg->next; } } if (locked_msgq) { erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } erts_proc_notify_new_message(receiver, receiver_locks); return res; }
/* Add a message last in message queue */ void erts_queue_message(Process* receiver, ErtsProcLocks *receiver_locks, ErlHeapFragment* bp, Eterm message, Eterm seq_trace_token #ifdef USE_VM_PROBES , Eterm dt_utag #endif ) { ErlMessage* mp; #ifdef ERTS_SMP ErtsProcLocks need_locks; #else ASSERT(bp != NULL || receiver->mbuf == NULL); #endif ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver)); mp = message_alloc(); #ifdef ERTS_SMP need_locks = ~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS); if (need_locks) { *receiver_locks |= need_locks; if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) { if (need_locks == ERTS_PROC_LOCK_MSGQ) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks = (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS); } erts_smp_proc_lock(receiver, need_locks); } } if (receiver->is_exiting || ERTS_PROC_PENDING_EXIT(receiver)) { /* Drop message if receiver is exiting or has a pending * exit ... */ if (bp) free_message_buffer(bp); message_free(mp); return; } #endif ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = seq_trace_token; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = dt_utag; #endif mp->next = NULL; mp->data.heap_frag = bp; #ifdef ERTS_SMP if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); } else { LINK_MESSAGE(receiver, mp); } #else LINK_MESSAGE(receiver, mp); #endif #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; dtrace_proc_str(receiver, receiver_name); if (seq_trace_token != NIL && is_tuple(seq_trace_token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token)); } DTRACE6(message_queued, receiver_name, size_object(message), receiver->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif notify_new_message(receiver); if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); #endif }
Uint erts_prep_msgq_for_inspection(Process *c_p, Process *rp, ErtsProcLocks rp_locks, ErtsMessageInfo *mip) { Uint tot_heap_size; ErtsMessage* mp; Sint i; int self_on_heap; /* * Prepare the message queue for inspection * by process_info(). * * * - Decode all messages on external format * - Remove all corrupt dist messages from queue * - Save pointer to, and heap size need of each * message in the mip array. * - Return total heap size need for all messages * that needs to be copied. * * If ERTS_INSPECT_MSGQ_KEEP_OH_MSGS == 0: * - In case off heap messages is disabled and * we are inspecting our own queue, move all * off heap data into the heap. */ self_on_heap = c_p == rp && !(c_p->flags & F_OFF_HEAP_MSGQ); tot_heap_size = 0; i = 0; mp = rp->msg.first; while (mp) { Eterm msg = ERL_MESSAGE_TERM(mp); mip[i].size = 0; if (is_non_value(msg)) { /* Dist message on external format; decode it... */ if (mp->data.attached) erts_decode_dist_message(rp, rp_locks, mp, ERTS_INSPECT_MSGQ_KEEP_OH_MSGS); msg = ERL_MESSAGE_TERM(mp); if (is_non_value(msg)) { ErtsMessage **mpp; ErtsMessage *bad_mp = mp; /* * Bad distribution message; remove * it from the queue... */ ASSERT(!mp->data.attached); mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next; ASSERT(*mpp == bad_mp); erts_msgq_update_internal_pointers(&rp->msg, mpp, &bad_mp->next); mp = mp->next; *mpp = mp; rp->msg.len--; bad_mp->next = NULL; erts_cleanup_messages(bad_mp); continue; } } ASSERT(is_value(msg)); #if ERTS_INSPECT_MSGQ_KEEP_OH_MSGS if (is_not_immed(msg) && (!self_on_heap || mp->data.attached)) { Uint sz = size_object(msg); mip[i].size = sz; tot_heap_size += sz; } #else if (self_on_heap) { if (mp->data.attached) { ErtsMessage *tmp = NULL; if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG) { erts_link_mbuf_to_proc(rp, mp->data.heap_frag); mp->data.attached = NULL; } else { /* * Need to replace the message reference since * we will get references to the message data * from the heap... */ ErtsMessage **mpp; tmp = erts_alloc_message(0, NULL); sys_memcpy((void *) tmp->m, (void *) mp->m, sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ); mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next; erts_msgq_replace_msg_ref(&rp->msg, tmp, mpp); erts_save_message_in_proc(rp, mp); mp = tmp; } } } else if (is_not_immed(msg)) { Uint sz = size_object(msg); mip[i].size = sz; tot_heap_size += sz; } #endif mip[i].msgp = mp; i++; mp = mp->next; } return tot_heap_size; }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks rcvr_locks, ErtsDistExternal *dist_ext, ErlHeapFragment *hfrag, Eterm token, Eterm from) { ErtsMessage* mp; erts_aint_t state; ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); if (hfrag) { /* Fragmented message, allocate a message reference */ mp = erts_alloc_message(0, NULL); mp->data.heap_frag = hfrag; } else { /* Un-fragmented message, allocate space for token and dist_ext in message. */ Uint dist_ext_sz = erts_dist_ext_size(dist_ext) / sizeof(Eterm); Uint token_sz = size_object(token); Uint sz = token_sz + dist_ext_sz; Eterm *hp; mp = erts_alloc_message(sz, &hp); mp->data.heap_frag = &mp->hfrag; mp->hfrag.used_size = token_sz; erts_make_dist_ext_copy(dist_ext, erts_get_dist_ext(mp->data.heap_frag)); token = copy_struct(token, token_sz, &hp, &mp->data.heap_frag->off_heap); } ERL_MESSAGE_FROM(mp) = dist_ext->dep->sysname; ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; if (token == am_have_dt_utag) ERL_MESSAGE_TOKEN(mp) = NIL; else #endif ERL_MESSAGE_TOKEN(mp) = token; if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; ErtsProcLocks unlocks = rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (unlocks) { erts_proc_unlock(rcvr, unlocks); need_locks |= unlocks; } erts_proc_lock(rcvr, need_locks); } } state = erts_atomic32_read_acqb(&rcvr->state); if (state & ERTS_PSFLG_EXITING) { if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } else { LINK_MESSAGE(rcvr, mp); if (rcvr_locks & ERTS_PROC_LOCK_MAIN) erts_proc_sig_fetch(rcvr); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, rcvr_locks); } }
static Eterm reference_table_term(Uint **hpp, Uint *szp) { #undef MK_2TUP #undef MK_3TUP #undef MK_CONS #undef MK_UINT #define MK_2TUP(E1, E2) erts_bld_tuple(hpp, szp, 2, (E1), (E2)) #define MK_3TUP(E1, E2, E3) erts_bld_tuple(hpp, szp, 3, (E1), (E2), (E3)) #define MK_CONS(CAR, CDR) erts_bld_cons(hpp, szp, (CAR), (CDR)) #define MK_UINT(UI) erts_bld_uint(hpp, szp, (UI)) int i; Eterm tup; Eterm tup2; Eterm nl = NIL; Eterm dl = NIL; Eterm nrid; for(i = 0; i < no_referred_nodes; i++) { NodeReferrer *nrp; Eterm nril = NIL; for(nrp = referred_nodes[i].referrers; nrp; nrp = nrp->next) { Eterm nrl = NIL; /* NodeReferenceList = [{ReferenceType,References}] */ if(nrp->heap_ref) { tup = MK_2TUP(AM_heap, MK_UINT(nrp->heap_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->link_ref) { tup = MK_2TUP(AM_link, MK_UINT(nrp->link_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->monitor_ref) { tup = MK_2TUP(AM_monitor, MK_UINT(nrp->monitor_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->ets_ref) { tup = MK_2TUP(AM_ets, MK_UINT(nrp->ets_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->bin_ref) { tup = MK_2TUP(AM_binary, MK_UINT(nrp->bin_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->timer_ref) { tup = MK_2TUP(AM_timer, MK_UINT(nrp->timer_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->system_ref) { tup = MK_2TUP(AM_system, MK_UINT(nrp->system_ref)); nrl = MK_CONS(tup, nrl); } nrid = nrp->id; if (!IS_CONST(nrp->id)) { Uint nrid_sz = size_object(nrp->id); if (szp) *szp += nrid_sz; if (hpp) nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL); } if (is_internal_pid(nrid) || nrid == am_error_logger) { ASSERT(!nrp->ets_ref && !nrp->bin_ref && !nrp->system_ref); tup = MK_2TUP(AM_process, nrid); } else if (is_tuple(nrid)) { Eterm *t; ASSERT(!nrp->ets_ref && !nrp->bin_ref); t = tuple_val(nrid); ASSERT(2 == arityval(t[0])); tup = MK_2TUP(t[1], t[2]); } else if(is_internal_port(nrid)) { ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref && !nrp->timer_ref && !nrp->system_ref); tup = MK_2TUP(AM_port, nrid); } else if(nrp->ets_ref) { ASSERT(!nrp->heap_ref && !nrp->link_ref && !nrp->monitor_ref && !nrp->bin_ref && !nrp->timer_ref && !nrp->system_ref); tup = MK_2TUP(AM_ets, nrid); } else if(nrp->bin_ref) { ASSERT(is_small(nrid) || is_big(nrid)); ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->link_ref && !nrp->monitor_ref && !nrp->timer_ref && !nrp->system_ref); tup = MK_2TUP(AM_match_spec, nrid); } else { ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref); ASSERT(is_atom(nrid)); tup = MK_2TUP(AM_dist, nrid); } tup = MK_2TUP(tup, nrl); /* NodeReferenceIdList = [{{ReferrerType, ID}, NodeReferenceList}] */ nril = MK_CONS(tup, nril); } /* NodeList = [{{Node, Creation}, Refc, NodeReferenceIdList}] */ tup = MK_2TUP(referred_nodes[i].node->sysname, MK_UINT(referred_nodes[i].node->creation)); tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 1)), nril); nl = MK_CONS(tup, nl); } for(i = 0; i < no_referred_dists; i++) { DistReferrer *drp; Eterm dril = NIL; for(drp = referred_dists[i].referrers; drp; drp = drp->next) { Eterm drl = NIL; /* DistReferenceList = [{ReferenceType,References}] */ if(drp->node_ref) { tup = MK_2TUP(AM_node, MK_UINT(drp->node_ref)); drl = MK_CONS(tup, drl); } if(drp->ctrl_ref) { tup = MK_2TUP(AM_control, MK_UINT(drp->ctrl_ref)); drl = MK_CONS(tup, drl); } if (is_internal_pid(drp->id)) { ASSERT(drp->ctrl_ref && !drp->node_ref); tup = MK_2TUP(AM_process, drp->id); } else if(is_internal_port(drp->id)) { ASSERT(drp->ctrl_ref && !drp->node_ref); tup = MK_2TUP(AM_port, drp->id); } else { ASSERT(!drp->ctrl_ref && drp->node_ref); ASSERT(is_atom(drp->id)); tup = MK_2TUP(drp->id, MK_UINT(drp->creation)); tup = MK_2TUP(AM_node, tup); } tup = MK_2TUP(tup, drl); /* DistReferenceIdList = [{{ReferrerType, ID}, DistReferenceList}] */ dril = MK_CONS(tup, dril); } /* DistList = [{Dist, Refc, ReferenceIdList}] */ tup = MK_3TUP(referred_dists[i].dist->sysname, MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 1)), dril); dl = MK_CONS(tup, dl); } /* {{node_references, NodeList}, {dist_references, DistList}} */ tup = MK_2TUP(AM_node_references, nl); tup2 = MK_2TUP(AM_dist_references, dl); tup = MK_2TUP(tup, tup2); return tup; #undef MK_2TUP #undef MK_3TUP #undef MK_CONS #undef MK_UINT }
static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I) { BIF_RETTYPE ret; if (am_scheduler == arg1) { ErtsSchedulerData *esdp; if (arg2 != am_type) goto badarg; esdp = erts_proc_sched_data(c_p); if (!esdp) goto scheduler_type_error; switch (esdp->type) { case ERTS_SCHED_NORMAL: ERTS_BIF_PREP_RET(ret, am_normal); break; case ERTS_SCHED_DIRTY_CPU: ERTS_BIF_PREP_RET(ret, am_dirty_cpu); break; case ERTS_SCHED_DIRTY_IO: ERTS_BIF_PREP_RET(ret, am_dirty_io); break; default: scheduler_type_error: ERTS_BIF_PREP_RET(ret, am_error); break; } } else if (am_error == arg1) { switch (arg2) { case am_notsup: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP); break; case am_undef: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF); break; case am_badarith: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH); break; case am_noproc: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC); break; case am_system_limit: ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT); break; case am_badarg: default: goto badarg; } } else if (am_copy == arg1) { int i; Eterm res; for (res = NIL, i = 0; i < 1000; i++) { Eterm *hp, sz; Eterm cpy; /* We do not want this to be optimized, but rather the oposite... */ sz = size_object(arg2); hp = HAlloc(c_p, sz); cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap); hp = HAlloc(c_p, 2); res = CONS(hp, cpy, res); } ERTS_BIF_PREP_RET(ret, res); } else if (am_send == arg1) { dirty_send_message(c_p, arg2, am_ok); ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("wait", arg1)) { if (!ms_wait(c_p, arg2, type == am_dirty_cpu)) goto badarg; ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("reschedule", arg1)) { /* * Reschedule operation after decrement of two until we reach * zero. Switch between dirty scheduler types when 'n' is * evenly divided by 4. If the initial value wasn't evenly * dividable by 2, throw badarg exception. */ Eterm next_type; Sint n; if (!term_to_Sint(arg2, &n) || n < 0) goto badarg; if (n == 0) ERTS_BIF_PREP_RET(ret, am_ok); else { Eterm argv[3]; Eterm eint = erts_make_integer((Uint) (n - 2), c_p); if (n % 4 != 0) next_type = type; else { switch (type) { case am_dirty_cpu: next_type = am_dirty_io; break; case am_dirty_io: next_type = am_normal; break; case am_normal: next_type = am_dirty_cpu; break; default: goto badarg; } } switch (next_type) { case am_dirty_io: argv[0] = arg1; argv[1] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_io_2, ERTS_SCHED_DIRTY_IO, am_erts_debug, am_dirty_io, 2); break; case am_dirty_cpu: argv[0] = arg1; argv[1] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_cpu_2, ERTS_SCHED_DIRTY_CPU, am_erts_debug, am_dirty_cpu, 2); break; case am_normal: argv[0] = am_normal; argv[1] = arg1; argv[2] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_3, ERTS_SCHED_NORMAL, am_erts_debug, am_dirty, 3); break; default: goto badarg; } } } else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) { ERTS_DECL_AM(ready); ERTS_DECL_AM(done); dirty_send_message(c_p, arg2, AM_ready); ms_wait(c_p, make_small(6000), 0); dirty_send_message(c_p, arg2, AM_done); ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) { Process *real_c_p = erts_proc_shadow2real(c_p); Eterm *hp, *hp2; Uint sz; int i; ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO; if (ERTS_PROC_IS_EXITING(real_c_p)) goto badarg; dirty_send_message(c_p, arg2, am_alive); /* Wait until dead */ while (!ERTS_PROC_IS_EXITING(real_c_p)) { if (dirty_io) ms_wait(c_p, make_small(100), 0); else erts_thr_yield(); } ms_wait(c_p, make_small(1000), 0); /* Should still be able to allocate memory */ hp = HAlloc(c_p, 3); /* Likely on heap */ sz = 10000; hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */ *hp2 = make_pos_bignum_header(sz); for (i = 1; i < sz; i++) hp2[i] = (Eterm) 4711; ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2))); } else { badarg: ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); } return ret; }
/* Add a message last in message queue */ static Sint queue_message(Process *c_p, Process* receiver, erts_aint32_t *receiver_state, ErtsProcLocks *receiver_locks, ErtsMessage* mp, Eterm message, Eterm seq_trace_token #ifdef USE_VM_PROBES , Eterm dt_utag #endif ) { Sint res; int locked_msgq = 0; erts_aint32_t state; ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver)); #ifdef ERTS_SMP if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; if (receiver_state) state = *receiver_state; else state = erts_smp_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; if (*receiver_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks |= ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(receiver, need_locks); } locked_msgq = 1; } #endif state = erts_smp_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { #ifdef ERTS_SMP exiting: #endif /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_cleanup_messages(mp); return 0; } ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = seq_trace_token; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = dt_utag; #endif res = receiver->msg.len; #ifdef ERTS_SMP if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ res += receiver->msg_inq.len; ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); } else #endif { LINK_MESSAGE(receiver, mp); } #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; dtrace_proc_str(receiver, receiver_name); if (seq_trace_token != NIL && is_tuple(seq_trace_token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token)); } DTRACE6(message_queued, receiver_name, size_object(message), receiver->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) trace_receive(receiver, message); if (locked_msgq) erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(receiver, #ifdef ERTS_SMP *receiver_locks #else 0 #endif ); #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); #endif return res; }
Sint erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, Eterm message, unsigned flags) { Uint msize; ErlHeapFragment* bp = NULL; Eterm token = NIL; Sint res = 0; #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif BM_STOP_TIMER(system); BM_MESSAGE(message,sender,receiver); BM_START_TIMER(send); #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id); erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id); } #endif if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; Eterm stoken = SEQ_TRACE_TOKEN(sender); Uint seq_trace_size = 0; #ifdef USE_VM_PROBES Uint dt_utag_size = 0; Eterm utag = NIL; #endif BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); #ifdef USE_VM_PROBES if (stoken != am_have_dt_utag) { #endif seq_trace_update_send(sender); seq_trace_output(stoken, message, SEQ_TRACE_SEND, receiver->common.id, sender); seq_trace_size = 6; /* TUPLE5 */ #ifdef USE_VM_PROBES } if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { dt_utag_size = size_object(DT_UTAG(sender)); } else if (stoken == am_have_dt_utag ) { stoken = NIL; } #endif bp = new_message_buffer(msize + seq_trace_size #ifdef USE_VM_PROBES + dt_utag_size #endif ); hp = bp->mem; BM_SWAP_TIMER(send,copy); token = copy_struct(stoken, seq_trace_size, &hp, &bp->off_heap); message = copy_struct(message, msize, &hp, &bp->off_heap); #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, &bp->off_heap); #ifdef DTRACE_TAG_HARDDEBUG erts_fprintf(stderr, "Dtrace -> (%T) Spreading tag (%T) with " "message %T!\r\n",sender->common.id, utag, message); #endif } #endif BM_MESSAGE_COPIED(msize); BM_SWAP_TIMER(copy,send); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_send)) { if (stoken != NIL && stoken != am_have_dt_utag) { tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken)); } DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); } #endif res = queue_message(NULL, receiver, receiver_locks, NULL, bp, message, token #ifdef USE_VM_PROBES , utag #endif ); BM_SWAP_TIMER(send,system); } else if (sender == receiver) { /* Drop message if receiver has a pending exit ... */ #ifdef ERTS_SMP ErtsProcLocks need_locks = (~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS)); if (need_locks) { *receiver_locks |= need_locks; if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) { if (need_locks == ERTS_PROC_LOCK_MSGQ) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks = ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(receiver, need_locks); } } if (!ERTS_PROC_PENDING_EXIT(receiver)) #endif { ErlMessage* mp = message_alloc(); DTRACE6(message_send, sender_name, receiver_name, size_object(message), tok_label, tok_lastcnt, tok_serial); mp->data.attached = NULL; ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = NIL; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; #endif mp->next = NULL; /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); res = receiver->msg.len; if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } } BM_SWAP_TIMER(send,system); } else { ErlOffHeap *ohp; Eterm *hp; erts_aint32_t state; BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); hp = erts_alloc_message_heap_state(msize, &bp, &ohp, receiver, receiver_locks, &state); BM_SWAP_TIMER(send,copy); message = copy_struct(message, msize, &hp, ohp); BM_MESSAGE_COPIED(msz); BM_SWAP_TIMER(copy,send); DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); res = queue_message(sender, receiver, receiver_locks, &state, bp, message, token #ifdef USE_VM_PROBES , NIL #endif ); BM_SWAP_TIMER(send,system); } return res; }
/* * Moves content of message buffer attached to a message into a heap. * The message buffer is deallocated. */ void erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg) { struct erl_off_heap_header* oh; Eterm term, token, *fhp, *hp; Sint offs; Uint sz; ErlHeapFragment *bp; #ifdef USE_VM_PROBES Eterm utag; #endif #ifdef HARD_DEBUG struct erl_off_heap_header* dbg_oh_start = off_heap->first; Eterm dbg_term, dbg_token; ErlHeapFragment *dbg_bp; Uint *dbg_hp, *dbg_thp_start; Uint dbg_term_sz, dbg_token_sz; #ifdef USE_VM_PROBES Eterm dbg_utag; Uint dbg_utag_sz; #endif #endif bp = msg->data.heap_frag; term = ERL_MESSAGE_TERM(msg); token = ERL_MESSAGE_TOKEN(msg); #ifdef USE_VM_PROBES utag = ERL_MESSAGE_DT_UTAG(msg); #endif if (!bp) { #ifdef USE_VM_PROBES ASSERT(is_immed(term) && is_immed(token) && is_immed(utag)); #else ASSERT(is_immed(term) && is_immed(token)); #endif return; } #ifdef HARD_DEBUG dbg_term_sz = size_object(term); dbg_token_sz = size_object(token); dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz); #ifdef USE_VM_PROBES dbg_utag_sz = size_object(utag); dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz + dbg_utag_sz ); #endif /*ASSERT(dbg_term_sz + dbg_token_sz == erts_msg_used_frag_sz(msg)); Copied size may be smaller due to removed SubBins's or garbage. Copied size may be larger due to duplicated shared terms. */ dbg_hp = dbg_bp->mem; dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap); dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap); #ifdef USE_VM_PROBES dbg_utag = copy_struct(utag, dbg_utag_sz, &dbg_hp, &dbg_bp->off_heap); #endif dbg_thp_start = *hpp; #endif if (bp->next != NULL) { move_multi_frags(hpp, off_heap, bp, msg->m, #ifdef USE_VM_PROBES 3 #else 2 #endif ); goto copy_done; } OH_OVERHEAD(off_heap, bp->off_heap.overhead); sz = bp->used_size; ASSERT(is_immed(term) || in_heapfrag(ptr_val(term),bp)); ASSERT(is_immed(token) || in_heapfrag(ptr_val(token),bp)); fhp = bp->mem; hp = *hpp; offs = hp - fhp; oh = NULL; while (sz--) { Uint cpy_sz; Eterm val = *fhp++; switch (primary_tag(val)) { case TAG_PRIMARY_IMMED1: *hp++ = val; break; case TAG_PRIMARY_LIST: case TAG_PRIMARY_BOXED: ASSERT(in_heapfrag(ptr_val(val), bp)); *hp++ = offset_ptr(val, offs); break; case TAG_PRIMARY_HEADER: *hp++ = val; switch (val & _HEADER_SUBTAG_MASK) { case ARITYVAL_SUBTAG: break; case REFC_BINARY_SUBTAG: case FUN_SUBTAG: case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: oh = (struct erl_off_heap_header*) (hp-1); cpy_sz = thing_arityval(val); goto cpy_words; default: cpy_sz = header_arity(val); cpy_words: ASSERT(sz >= cpy_sz); sz -= cpy_sz; while (cpy_sz >= 8) { cpy_sz -= 8; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; *hp++ = *fhp++; } switch (cpy_sz) { case 7: *hp++ = *fhp++; case 6: *hp++ = *fhp++; case 5: *hp++ = *fhp++; case 4: *hp++ = *fhp++; case 3: *hp++ = *fhp++; case 2: *hp++ = *fhp++; case 1: *hp++ = *fhp++; default: break; } if (oh) { /* Add to offheap list */ oh->next = off_heap->first; off_heap->first = oh; ASSERT(*hpp <= (Eterm*)oh); ASSERT(hp > (Eterm*)oh); oh = NULL; } break; } break; } } ASSERT(bp->used_size == hp - *hpp); *hpp = hp; if (is_not_immed(token)) { ASSERT(in_heapfrag(ptr_val(token), bp)); ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs); #ifdef HARD_DEBUG ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg))); ASSERT(hp > ptr_val(ERL_MESSAGE_TOKEN(msg))); #endif } if (is_not_immed(term)) { ASSERT(in_heapfrag(ptr_val(term),bp)); ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs); #ifdef HARD_DEBUG ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg))); ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg))); #endif } #ifdef USE_VM_PROBES if (is_not_immed(utag)) { ASSERT(in_heapfrag(ptr_val(utag), bp)); ERL_MESSAGE_DT_UTAG(msg) = offset_ptr(utag, offs); #ifdef HARD_DEBUG ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_DT_UTAG(msg))); ASSERT(hp > ptr_val(ERL_MESSAGE_DT_UTAG(msg))); #endif } #endif copy_done: #ifdef HARD_DEBUG { int i, j; ErlHeapFragment* frag; { struct erl_off_heap_header* dbg_oh = off_heap->first; i = j = 0; while (dbg_oh != dbg_oh_start) { dbg_oh = dbg_oh->next; i++; } for (frag=bp; frag; frag=frag->next) { dbg_oh = frag->off_heap.first; while (dbg_oh) { dbg_oh = dbg_oh->next; j++; } } ASSERT(i == j); } } #endif bp->off_heap.first = NULL; free_message_buffer(bp); msg->data.heap_frag = NULL; #ifdef HARD_DEBUG ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term)); ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token)); #ifdef USE_VM_PROBES ASSERT(eq(ERL_MESSAGE_DT_UTAG(msg), dbg_utag)); #endif free_message_buffer(dbg_bp); #endif }
//将消息放入目标进程的消息队列中 static Sint queue_message(Process *c_p, Process* receiver, ErtsProcLocks *receiver_locks, erts_aint32_t *receiver_state, ErlHeapFragment* bp, Eterm message, Eterm seq_trace_token #ifdef USE_VM_PROBES , Eterm dt_utag #endif ) { Sint res; ErlMessage* mp; int locked_msgq = 0; erts_aint_t state; #ifndef ERTS_SMP ASSERT(bp != NULL || receiver->mbuf == NULL); #endif ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver)); mp = message_alloc(); if (receiver_state){ state = *receiver_state; }else{ state = erts_smp_atomic32_read_acqb(&receiver->state); } #ifdef ERTS_SMP //如果目标进程处在退出的状态,直接进入退出清理 if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)){ goto exiting; } //如果当前没有目标Erlang进程的消息队列锁 //尝试获取消息队列的锁 if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; if (*receiver_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); need_locks |= ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(receiver, need_locks); } locked_msgq = 1; state = erts_smp_atomic32_read_nob(&receiver->state); //获取目标Erlang进程的状态 if (receiver_state){ *receiver_state = state; } } #endif //Erlang进程处于退出的状态 //直接释放目标进程的锁,和当前消息 if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { #ifdef ERTS_SMP exiting: #endif /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq){ erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } if (bp){ free_message_buffer(bp); } message_free(mp); return 0; } //从此处可以看出,放入目标进程的消息,只是对当前进程的消息的一个指针应用 //那我们需要找出,message是如何被处理的,何时增加的引用计数 ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = seq_trace_token; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = dt_utag; #endif mp->next = NULL; mp->data.heap_frag = bp; #ifndef ERTS_SMP res = receiver->msg.len; #else res = receiver->msg_inq.len; //把消息attach的目标Erlang进程的消息队列或者private //的消息队列 if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ //这个时候可以操作msg_inq和msg res += receiver->msg.len; ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); } else #endif { //这个时候只操作msg_inq LINK_MESSAGE(receiver, mp); } #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; dtrace_proc_str(receiver, receiver_name); if (seq_trace_token != NIL && is_tuple(seq_trace_token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token)); } DTRACE6(message_queued, receiver_name, size_object(message), receiver->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)){ trace_receive(receiver, message); } if (locked_msgq){ erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } erts_proc_notify_new_message(receiver, #ifdef ERTS_SMP *receiver_locks #else 0 #endif ); #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); #endif return res; }
void erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, Eterm reason, Eterm token) { Eterm mess; Eterm save; Eterm from_copy; Uint sz_reason; Uint sz_token; Uint sz_from; Eterm* hp; Eterm temptoken; ErtsMessage* mp; ErlOffHeap *ohp; #ifdef SHCOPY_SEND erts_shcopy_t info; #endif if (have_seqtrace(token)) { ASSERT(is_tuple(token)); sz_token = size_object(token); sz_from = size_object(from); #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); sz_reason = copy_shared_calculate(reason, &info); #else sz_reason = size_object(reason); #endif mp = erts_alloc_message_heap(to, to_locksp, sz_reason + sz_from + sz_token + 4, &hp, &ohp); #ifdef SHCOPY_SEND mess = copy_shared_perform(reason, sz_reason, &info, &hp, ohp); DESTROY_SHCOPY(info); #else mess = copy_struct(reason, sz_reason, &hp, ohp); #endif from_copy = copy_struct(from, sz_from, &hp, ohp); save = TUPLE3(hp, am_EXIT, from_copy, mess); hp += 4; /* the trace token must in this case be updated by the caller */ seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL); temptoken = copy_struct(token, sz_token, &hp, ohp); ERL_MESSAGE_TOKEN(mp) = temptoken; erts_queue_message(to, *to_locksp, mp, save, am_system); } else { sz_from = IS_CONST(from) ? 0 : size_object(from); #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); sz_reason = copy_shared_calculate(reason, &info); #else sz_reason = size_object(reason); #endif mp = erts_alloc_message_heap(to, to_locksp, sz_reason+sz_from+4, &hp, &ohp); #ifdef SHCOPY_SEND mess = copy_shared_perform(reason, sz_reason, &info, &hp, ohp); DESTROY_SHCOPY(info); #else mess = copy_struct(reason, sz_reason, &hp, ohp); #endif from_copy = (IS_CONST(from) ? from : copy_struct(from, sz_from, &hp, ohp)); save = TUPLE3(hp, am_EXIT, from_copy, mess); erts_queue_message(to, *to_locksp, mp, save, am_system); } }
Eterm erl_create_process(ErlProcess* parent, Eterm module, Eterm function, Eterm args, ErlSpawnOpts* opts) { int i; uint16_t last_proc; for(i=0; i<MAX_PROCESSES; i++) { if(proc_tab[i].active == 0) { last_proc = i; break; } } if(last_proc == MAX_PROCESSES) { erl_exit("maximum number of processes reached!"); //@todo return NIL; } ErlProcess* p = (ErlProcess*)&proc_tab[last_proc]; Eterm pid = pix2pid(last_proc); p->parent = parent; p->id = pid; p->timer.active = 0; p->timer.arg = NULL; p->timer.cancel = NULL; p->timer.count = 0; p->timer.next = NULL; p->timer.prev = NULL; p->timer.slot = 0; p->timer.timeout = NULL; //@todo throw an error if exported was not found p->arity = 3; p->arg_reg = p->def_arg_reg; p->max_arg_reg = sizeof(p->def_arg_reg)/sizeof(p->def_arg_reg[0]); for(i=0; i<p->max_arg_reg; i++) { p->def_arg_reg[i] = 0; } p->fcalls = REDUCTIONS; p->active = 1; p->flags = 0; p->msg.len = 0; p->msg.first = NULL; p->msg.last = &p->msg.first; p->msg.save = &p->msg.first; p->msg.saved_last = NULL; p->links = NULL; if(opts && (opts->flags & SPO_LINK)) { erts_add_link(&p->links, parent->id); erts_add_link(&parent->links, p->id); } // initialize heap unsigned int arg_size = size_object(args); unsigned int heap_need = arg_size; unsigned int sz = erts_next_heap_size(heap_need); p->heap = (Eterm*)pvPortMalloc(sz * sizeof(Eterm)); p->stop = p->hend = p->heap + sz; p->htop = p->heap; p->heap_sz = sz; p->i = (BeamInstr*)(beam_apply); p->cp = (BeamInstr*)(beam_apply + 1); p->saved_i = (BeamInstr*)(beam_apply + 2); p->arg_reg[0] = module; p->arg_reg[1] = function; p->arg_reg[2] = copy_struct(args, arg_size, &p->htop); //start process inside the FreeRTOS scheduler vTaskResume(*(p->handle)); return pid; }
Sint erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, Eterm message, unsigned flags) { Uint msize; ErtsMessage* mp; ErlOffHeap *ohp; Eterm token = NIL; Sint res = 0; #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; Eterm utag = NIL; #endif erts_aint32_t receiver_state; #ifdef SHCOPY_SEND erts_shcopy_t info; #else erts_literal_area_t litarea; INITIALIZE_LITERAL_PURGE_AREA(litarea); #endif #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id); erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id); } #endif receiver_state = erts_atomic32_read_nob(&receiver->state); if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; Eterm stoken = SEQ_TRACE_TOKEN(sender); Uint seq_trace_size = 0; #ifdef USE_VM_PROBES Uint dt_utag_size = 0; #endif /* SHCOPY corrupts the heap between * copy_shared_calculate, and * copy_shared_perform. (it inserts move_markers like the gc). * Make sure we don't use the heap between those instances. */ if (have_seqtrace(stoken)) { seq_trace_update_send(sender); seq_trace_output(stoken, message, SEQ_TRACE_SEND, receiver->common.id, sender); seq_trace_size = 6; /* TUPLE5 */ } #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { dt_utag_size = size_object(DT_UTAG(sender)); } else if (stoken == am_have_dt_utag ) { stoken = NIL; } #endif #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else msize = size_object_litopt(message, &litarea); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, receiver_locks, (msize #ifdef USE_VM_PROBES + dt_utag_size #endif + seq_trace_size), &hp, &ohp); #ifdef SHCOPY_SEND if (is_not_immed(message)) message = copy_shared_perform(message, msize, &info, &hp, ohp); DESTROY_SHCOPY(info); #else if (is_not_immed(message)) message = copy_struct_litopt(message, msize, &hp, ohp, &litarea); #endif if (is_immed(stoken)) token = stoken; else token = copy_struct(stoken, seq_trace_size, &hp, ohp); #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { if (is_immed(DT_UTAG(sender))) utag = DT_UTAG(sender); else utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, ohp); } if (DTRACE_ENABLED(message_send)) { if (have_seqtrace(stoken)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken)); } DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); } #endif } else { Eterm *hp; if (receiver == sender && !(receiver_state & ERTS_PSFLG_OFF_HEAP_MSGQ)) { mp = erts_alloc_message(0, NULL); msize = 0; } else { #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else msize = size_object_litopt(message, &litarea); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, receiver_locks, msize, &hp, &ohp); #ifdef SHCOPY_SEND if (is_not_immed(message)) message = copy_shared_perform(message, msize, &info, &hp, ohp); DESTROY_SHCOPY(info); #else if (is_not_immed(message)) message = copy_struct_litopt(message, msize, &hp, ohp, &litarea); #endif } #ifdef USE_VM_PROBES DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); #endif } ERL_MESSAGE_TOKEN(mp) = token; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = utag; #endif res = queue_message(receiver, &receiver_state, *receiver_locks, mp, message, sender->common.id); return res; }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks *rcvr_locks, ErtsDistExternal *dist_ext, Eterm token) { ErlMessage* mp; #ifdef USE_VM_PROBES Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif #ifdef ERTS_SMP erts_aint_t state; #endif ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = message_alloc(); #ifdef ERTS_SMP if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; if (*rcvr_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS); need_locks |= ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(rcvr, need_locks); } } state = erts_smp_atomic32_read_acqb(&rcvr->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ if (is_not_nil(token)) { ErlHeapFragment *heap_frag; heap_frag = erts_dist_ext_trailer(mp->data.dist_ext); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_ext); message_free(mp); } else #endif if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { /* Ahh... need to decode it in order to trace it... */ ErlHeapFragment *mbuf; Eterm msg; if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); message_free(mp); msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext); if (is_value(msg)) #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (token != NIL && token != am_have_dt_utag) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } DTRACE6(message_queued, receiver_name, size_object(msg), rcvr->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif erts_queue_message(rcvr, rcvr_locks, mbuf, msg, token); } else { /* Enqueue message on external format */ ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; if (token == am_have_dt_utag) { ERL_MESSAGE_TOKEN(mp) = NIL; } else { #endif ERL_MESSAGE_TOKEN(mp) = token; #ifdef USE_VM_PROBES } #endif mp->next = NULL; #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (token != NIL && token != am_have_dt_utag) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } /* * TODO: We don't know the real size of the external message here. * -1 will appear to a D script as 4294967295. */ DTRACE6(message_queued, receiver_name, -1, rcvr->msg.len + 1, tok_label, tok_lastcnt, tok_serial); } #endif mp->data.dist_ext = dist_ext; LINK_MESSAGE(rcvr, mp); if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, #ifdef ERTS_SMP *rcvr_locks #else 0 #endif ); } }
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *bp, Uint size, Eterm *brefs, Uint brefs_size) { #ifdef DEBUG int i; #endif #ifdef HARD_DEBUG ErlHeapFragment *dbg_bp; Eterm *dbg_brefs; Uint dbg_size; Uint dbg_tot_size; Eterm *dbg_hp; #endif ErlHeapFragment* nbp; #ifdef DEBUG { Uint off_sz = size < bp->used_size ? size : bp->used_size; for (i = 0; i < brefs_size; i++) { Eterm *ptr; if (is_immed(brefs[i])) continue; ptr = ptr_val(brefs[i]); ASSERT(&bp->mem[0] <= ptr && ptr < &bp->mem[0] + off_sz); } } #endif if (size >= (bp->used_size - bp->used_size / 16)) { bp->used_size = size; return bp; } #ifdef HARD_DEBUG dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size); dbg_bp = new_message_buffer(bp->used_size); dbg_hp = dbg_bp->mem; dbg_tot_size = 0; for (i = 0; i < brefs_size; i++) { dbg_size = size_object(brefs[i]); dbg_tot_size += dbg_size; dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp, &dbg_bp->off_heap); } ASSERT(dbg_tot_size == (size < bp->used_size ? size : bp->used_size)); #endif nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG, (void *) bp, ERTS_HEAP_FRAG_SIZE(bp->alloc_size), ERTS_HEAP_FRAG_SIZE(size)); if (bp != nbp) { Uint off_sz = size < nbp->used_size ? size : nbp->used_size; Eterm *sp = &bp->mem[0]; Eterm *ep = sp + off_sz; Sint offs = &nbp->mem[0] - sp; erts_offset_off_heap(&nbp->off_heap, offs, sp, ep); erts_offset_heap(&nbp->mem[0], off_sz, offs, sp, ep); if (brefs && brefs_size) erts_offset_heap_ptr(brefs, brefs_size, offs, sp, ep); #ifdef DEBUG for (i = 0; i < brefs_size; i++) { Eterm *ptr; if (is_immed(brefs[i])) continue; ptr = ptr_val(brefs[i]); ASSERT(&nbp->mem[0] <= ptr && ptr < &nbp->mem[0] + off_sz); } #endif } nbp->alloc_size = size; nbp->used_size = size; #ifdef HARD_DEBUG for (i = 0; i < brefs_size; i++) ASSERT(eq(dbg_brefs[i], brefs[i])); free_message_buffer(dbg_bp); erts_free(ERTS_ALC_T_UNDEF, dbg_brefs); #endif return nbp; }
static Eterm do_get_all(Process* c_p, TrapData* trap_data, Eterm res) { HashTable* hash_table; Uint remaining; Uint idx; Uint max_iter; Uint i; Eterm* hp; Uint heap_size; struct copy_term { Uint key_size; Eterm* tuple_ptr; } *copy_data; hash_table = trap_data->table; idx = trap_data->idx; #if defined(DEBUG) || defined(VALGRIND) max_iter = 50; #else max_iter = ERTS_BIF_REDS_LEFT(c_p); #endif remaining = trap_data->remaining < max_iter ? trap_data->remaining : max_iter; trap_data->remaining -= remaining; copy_data = (struct copy_term *) erts_alloc(ERTS_ALC_T_TMP, remaining * sizeof(struct copy_term)); i = 0; heap_size = (2 + 3) * remaining; while (remaining != 0) { Eterm term = hash_table->term[idx]; if (is_tuple(term)) { Uint key_size; Eterm* tup_val; ASSERT(is_tuple_arity(term, 2)); tup_val = tuple_val(term); key_size = size_object(tup_val[1]); copy_data[i].key_size = key_size; copy_data[i].tuple_ptr = tup_val; heap_size += key_size; i++; remaining--; } idx++; } trap_data->idx = idx; hp = HAlloc(c_p, heap_size); remaining = i; for (i = 0; i < remaining; i++) { Eterm* tuple_ptr; Uint key_size; Eterm key; Eterm tup; tuple_ptr = copy_data[i].tuple_ptr; key_size = copy_data[i].key_size; key = copy_struct(tuple_ptr[1], key_size, &hp, &c_p->off_heap); tup = TUPLE2(hp, key, tuple_ptr[2]); hp += 3; res = CONS(hp, tup, res); hp += 2; } erts_free(ERTS_ALC_T_TMP, copy_data); return res; }
Sint erts_move_messages_off_heap(Process *c_p) { int reds = 1; /* * Move all messages off heap. This *only* occurs when the * process had off heap message disabled and just enabled * it... */ ErtsMessage *mp; reds += c_p->msg.len / 10; ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); for (mp = c_p->msg.first; mp; mp = mp->next) { Uint msg_sz, token_sz; #ifdef USE_VM_PROBES Uint utag_sz; #endif Eterm *hp; ErlHeapFragment *hfrag; if (mp->data.attached) continue; if (is_immed(ERL_MESSAGE_TERM(mp)) #ifdef USE_VM_PROBES && is_immed(ERL_MESSAGE_DT_UTAG(mp)) #endif && is_not_immed(ERL_MESSAGE_TOKEN(mp))) continue; /* * The message refers into the heap. Copy the message * from the heap into a heap fragment and attach * it to the message... */ msg_sz = size_object(ERL_MESSAGE_TERM(mp)); #ifdef USE_VM_PROBES utag_sz = size_object(ERL_MESSAGE_DT_UTAG(mp)); #endif token_sz = size_object(ERL_MESSAGE_TOKEN(mp)); hfrag = new_message_buffer(msg_sz #ifdef USE_VM_PROBES + utag_sz #endif + token_sz); hp = hfrag->mem; if (is_not_immed(ERL_MESSAGE_TERM(mp))) ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp), msg_sz, &hp, &hfrag->off_heap); if (is_not_immed(ERL_MESSAGE_TOKEN(mp))) ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp), token_sz, &hp, &hfrag->off_heap); #ifdef USE_VM_PROBES if (is_not_immed(ERL_MESSAGE_DT_UTAG(mp))) ERL_MESSAGE_DT_UTAG(mp) = copy_struct(ERL_MESSAGE_DT_UTAG(mp), utag_sz, &hp, &hfrag->off_heap); #endif mp->data.heap_frag = hfrag; reds += 1; } return reds; }
void erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, Eterm message, unsigned flags) { Uint msize; ErlHeapFragment* bp = NULL; Eterm token = NIL; BM_STOP_TIMER(system); BM_MESSAGE(message,sender,receiver); BM_START_TIMER(send); if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); seq_trace_update_send(sender); seq_trace_output(SEQ_TRACE_TOKEN(sender), message, SEQ_TRACE_SEND, receiver->id, sender); bp = new_message_buffer(msize + 6 /* TUPLE5 */); hp = bp->mem; BM_SWAP_TIMER(send,copy); token = copy_struct(SEQ_TRACE_TOKEN(sender), 6 /* TUPLE5 */, &hp, &bp->off_heap); message = copy_struct(message, msize, &hp, &bp->off_heap); BM_MESSAGE_COPIED(msize); BM_SWAP_TIMER(copy,send); erts_queue_message(receiver, receiver_locks, bp, message, token); BM_SWAP_TIMER(send,system); #ifdef HYBRID } else { ErlMessage* mp = message_alloc(); BM_SWAP_TIMER(send,copy); #ifdef INCREMENTAL /* TODO: During GC activate processes if the message relies in * the fromspace and the sender is active. During major * collections add the message to the gray stack if it relies * in the old generation and the sender is active and the * receiver is inactive. if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) && (ptr_val(message) >= inc_fromspc && ptr_val(message) < inc_fromend) && INC_IS_ACTIVE(sender)) INC_ACTIVATE(receiver); else if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) && (ptr_val(message) >= global_old_heap && ptr_val(message) < global_old_hend) && INC_IS_ACTIVE(sender) && !INC_IS_ACTIVE(receiver)) Mark message in blackmap and add it to the gray stack */ if (!IS_CONST(message)) INC_ACTIVATE(receiver); #endif LAZY_COPY(sender,message); BM_SWAP_TIMER(copy,send); ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = NIL; mp->next = NULL; LINK_MESSAGE(receiver, mp); ACTIVATE(receiver); if (receiver->status == P_WAITING) { erts_add_to_runq(receiver); } else if (receiver->status == P_SUSPENDED) { receiver->rstatus = P_RUNABLE; } if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } BM_SWAP_TIMER(send,system); return; #else } else if (sender == receiver) {