void erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, Eterm reason, Eterm token) { Eterm mess; Eterm save; Eterm from_copy; Uint sz_reason; Uint sz_token; Uint sz_from; Eterm* hp; Eterm temptoken; ErlHeapFragment* bp = NULL; if (token != NIL #ifdef USE_VM_PROBES && token != am_have_dt_utag #endif ) { ASSERT(is_tuple(token)); sz_reason = size_object(reason); sz_token = size_object(token); sz_from = size_object(from); bp = new_message_buffer(sz_reason + sz_from + sz_token + 4); hp = bp->mem; mess = copy_struct(reason, sz_reason, &hp, &bp->off_heap); from_copy = copy_struct(from, sz_from, &hp, &bp->off_heap); save = TUPLE3(hp, am_EXIT, from_copy, mess); hp += 4; /* the trace token must in this case be updated by the caller */ seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL); temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap); erts_queue_message(to, to_locksp, bp, save, temptoken); } else { ErlOffHeap *ohp; sz_reason = size_object(reason); sz_from = IS_CONST(from) ? 0 : size_object(from); hp = erts_alloc_message_heap(sz_reason+sz_from+4, &bp, &ohp, to, to_locksp); mess = copy_struct(reason, sz_reason, &hp, ohp); from_copy = (IS_CONST(from) ? from : copy_struct(from, sz_from, &hp, ohp)); save = TUPLE3(hp, am_EXIT, from_copy, mess); erts_queue_message(to, to_locksp, bp, save, NIL); } }
static widget_handler_status_T init_listbox(struct dialog_data *dlg_data, struct widget_data *widget_data) { struct hierbox_browser *browser = (struct hierbox_browser *)dlg_data->dlg->udata2; struct listbox_data *box = get_listbox_widget_data(widget_data); /* Try to restore the position from last time */ if (!list_empty(browser->root.child) && browser->box_data.items) { copy_struct(box, &browser->box_data); traverse_listbox_items_list((struct listbox_item *)browser->root.child.next, box, 0, 0, check_old_state, box); box->sel = (!box->sel) ? browser->box_data.sel : NULL; box->top = (!box->top) ? browser->box_data.top : NULL; if (!box->sel) box->sel = box->top; if (!box->top) box->top = box->sel; } box->ops = browser->ops; box->items = &browser->root.child; add_to_list(browser->boxes, box); return EVENT_PROCESSED; }
BIF_RETTYPE port_set_data_2(BIF_ALIST_2) { Port* prt; Eterm portid = BIF_ARG_1; Eterm data = BIF_ARG_2; prt = id_or_name2port(BIF_P, portid); if (!prt) { BIF_ERROR(BIF_P, BADARG); } if (prt->bp != NULL) { free_message_buffer(prt->bp); prt->bp = NULL; } if (IS_CONST(data)) { prt->data = data; } else { Uint size; ErlHeapFragment* bp; Eterm* hp; size = size_object(data); prt->bp = bp = new_message_buffer(size); hp = bp->mem; prt->data = copy_struct(data, size, &hp, &bp->off_heap); } erts_smp_port_unlock(prt); BIF_RET(am_true); }
BIF_RETTYPE port_get_data_1(BIF_ALIST_1) { /* * This is not a signal. See comment above. */ Eterm res; erts_aint_t data; Port* prt; prt = data_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_ERROR(BIF_P, BADARG); data = erts_smp_atomic_read_ddrb(&prt->data); if (data == (erts_aint_t)NULL) BIF_ERROR(BIF_P, BADARG); /* Port terminated by racing thread */ if ((data & 0x3) != 0) { res = (Eterm) (UWord) data; ASSERT(is_immed(res)); } else { ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data; Eterm *hp = HAlloc(BIF_P, pdhp->hsize); res = copy_struct(pdhp->data, pdhp->hsize, &hp, &MSO(BIF_P)); } BIF_RET(res); }
static int deflate_open(int window_size, struct stream_encoded *stream, int fd) { /* A zero-initialized z_stream. The compiler ensures that all * pointer members in it are null. (Can't do this with memset * because C99 does not require all-bits-zero to be a null * pointer.) */ static const z_stream null_z_stream = {0}; int err; struct deflate_enc_data *data = mem_alloc(sizeof(*data)); stream->data = NULL; if (!data) { return -1; } /* Initialize all members of *data, except data->buf[], which * will be initialized on demand by deflate_read. */ copy_struct(&data->deflate_stream, &null_z_stream); data->fdread = fd; data->last_read = 0; data->after_first_read = 0; data->after_end = 0; err = inflateInit2(&data->deflate_stream, window_size); if (err != Z_OK) { mem_free(data); return -1; } stream->data = data; return 0; }
/** If @a loaded_in_frame is set, this was called just to indicate a move inside * a frameset, and we basically just reset the appropriate frame's view_state in * that case. When clicking on a link inside a frame, the frame URI is somehow * updated and added to the files-to-load queue, then ses_forward() is called * with @a loaded_in_frame unset, duplicating the whole frameset's location, * then later the file-to-load callback calls it for the particular frame with * @a loaded_in_frame set. */ struct view_state * ses_forward(struct session *ses, int loaded_in_frame) { struct location *loc = NULL; struct view_state *vs; if (!loaded_in_frame) { free_files(ses); mem_free_set(&ses->search_word, NULL); } x: if (!loaded_in_frame) { loc = mem_calloc(1, sizeof(*loc)); if (!loc) return NULL; copy_struct(&loc->download, &ses->loading); } if (ses->task.target.frame && *ses->task.target.frame) { struct frame *frame; assertm(have_location(ses), "no location yet"); if_assert_failed return NULL; if (!loaded_in_frame) { copy_location(loc, cur_loc(ses)); add_to_history(&ses->history, loc); } frame = ses_find_frame(ses, ses->task.target.frame); if (!frame) { if (!loaded_in_frame) { del_from_history(&ses->history, loc); destroy_location(loc); } mem_free_set(&ses->task.target.frame, NULL); goto x; } vs = &frame->vs; if (!loaded_in_frame) { destroy_vs(vs, 1); init_vs(vs, ses->loading_uri, vs->plain); } else { done_uri(vs->uri); vs->uri = get_uri_reference(ses->loading_uri); if (vs->doc_view) { /* vs->doc_view itself will get detached in * render_document_frames(), but that's too * late for us. */ vs->doc_view->vs = NULL; vs->doc_view = NULL; } #ifdef CONFIG_ECMASCRIPT vs->ecmascript_fragile = 1; #endif } } else {
NONSTATIC_INLINE void copy_opt(struct document_options *o1, struct document_options *o2) { copy_struct(o1, o2); o1->framename = stracpy(o2->framename); o1->image_link.prefix = stracpy(get_opt_str("document.browse.images.image_link_prefix", NULL)); o1->image_link.suffix = stracpy(get_opt_str("document.browse.images.image_link_suffix", NULL)); }
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term) { Uint sz; Eterm* hp; sz = size_object(src_term); hp = alloc_heap(dst_env, sz); return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc)); }
static inline void parse_sgml_attributes(struct dom_stack *stack, struct dom_scanner *scanner) { struct dom_scanner_token name; assert(dom_scanner_has_tokens(scanner) && (get_dom_scanner_token(scanner)->type == SGML_TOKEN_ELEMENT_BEGIN || (get_dom_stack_top(stack)->node->type == DOM_NODE_PROCESSING_INSTRUCTION))); if (get_dom_scanner_token(scanner)->type == SGML_TOKEN_ELEMENT_BEGIN) skip_dom_scanner_token(scanner); while (dom_scanner_has_tokens(scanner)) { struct dom_scanner_token *token = get_dom_scanner_token(scanner); assert(token); switch (token->type) { case SGML_TOKEN_TAG_END: skip_dom_scanner_token(scanner); /* and return */ case SGML_TOKEN_ELEMENT: case SGML_TOKEN_ELEMENT_BEGIN: case SGML_TOKEN_ELEMENT_END: case SGML_TOKEN_ELEMENT_EMPTY_END: return; case SGML_TOKEN_IDENT: copy_struct(&name, token); /* Skip the attribute name token */ token = get_next_dom_scanner_token(scanner); if (token && token->type == '=') { /* If the token is not a valid value token * ignore it. */ token = get_next_dom_scanner_token(scanner); if (token && token->type != SGML_TOKEN_IDENT && token->type != SGML_TOKEN_ATTRIBUTE && token->type != SGML_TOKEN_STRING) token = NULL; } else { token = NULL; } add_sgml_attribute(stack, &name, token); /* Skip the value token */ if (token) skip_dom_scanner_token(scanner); break; default: skip_dom_scanner_token(scanner); } } }
Eterm erts_msg_distext2heap(Process *pp, ErtsProcLocks *plcksp, ErlHeapFragment **bpp, Eterm *tokenp, ErtsDistExternal *dist_extp) { Eterm msg; Uint tok_sz = 0; Eterm *hp = NULL; ErtsHeapFactory factory; Sint sz; *bpp = NULL; sz = erts_decode_dist_ext_size(dist_extp); if (sz < 0) goto decode_error; if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); tok_sz = heap_frag->used_size; sz += tok_sz; } if (pp) { ErlOffHeap *ohp; hp = erts_alloc_message_heap(sz, bpp, &ohp, pp, plcksp); } else { *bpp = new_message_buffer(sz); hp = (*bpp)->mem; } erts_factory_message_init(&factory, pp, hp, *bpp); msg = erts_decode_dist_ext(&factory, dist_extp); if (is_non_value(msg)) goto decode_error; if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); hp = erts_produce_heap(&factory, tok_sz, 0); *tokenp = copy_struct(*tokenp, tok_sz, &hp, factory.off_heap); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_extp); erts_factory_close(&factory); return msg; decode_error: if (is_not_nil(*tokenp)) { ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp); erts_cleanup_offheap(&heap_frag->off_heap); } erts_free_dist_ext_copy(dist_extp); *bpp = NULL; return THE_NON_VALUE; }
static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp, Eterm *start, Eterm *end, char *lit_start, Uint lit_size) { Eterm* p; Eterm val; Uint sz; for (p = start; p < end; p++) { val = *p; switch (primary_tag(val)) { case TAG_PRIMARY_BOXED: case TAG_PRIMARY_LIST: if (ErtsInArea(val, lit_start, lit_size)) { sz = size_object(val); val = copy_struct(val, sz, hpp, ohp); *p = val; } break; case TAG_PRIMARY_HEADER: if (!header_is_transparent(val)) { Eterm* new_p; /* matchstate in message, not possible. */ if (header_is_bin_matchstate(val)) { ErlBinMatchState *ms = (ErlBinMatchState*) p; ErlBinMatchBuffer *mb = &(ms->mb); if (ErtsInArea(mb->orig, lit_start, lit_size)) { sz = size_object(mb->orig); mb->orig = copy_struct(mb->orig, sz, hpp, ohp); } } new_p = p + thing_arityval(val); ASSERT(start <= new_p && new_p < end); p = new_p; } } } }
/* * Copy object "obj" to process p. */ Eterm copy_object(Eterm obj, Process* to) { Uint size = size_object(obj); Eterm* hp = HAlloc(to, size); Eterm res; res = copy_struct(obj, size, &hp, &to->off_heap); #ifdef DEBUG if (eq(obj, res) == 0) { erl_exit(ERTS_ABORT_EXIT, "copy not equal to source\n"); } #endif return res; }
/* * InitBuffers() * * This function initializes the message buffers. */ void InitBuffers() { static SListBase MsgBase = { NULL, ChkCC, NULL, free, strdup }; copy_struct(MsgBase, msgBuf.mbCC); copy_struct(MsgBase, msgBuf.mbOverride); copy_struct(MsgBase, msgBuf.mbInternal); copy_struct(MsgBase, msgBuf.mbForeign); copy_struct(MsgBase, tempMess.mbOverride); copy_struct(MsgBase, tempMess.mbCC); copy_struct(MsgBase, tempMess.mbInternal); copy_struct(MsgBase, tempMess.mbForeign); msgBuf.mbtext = GetDynamic(MAXTEXT); tempMess.mbtext = GetDynamic(MAXTEXT); }
/** * Print out Sesame Street letters and numbers of the day, making * sure to delete your memory when you're done with it. * * This code asks for a day, letter, and number, and then prints * them out as "<day> is sponsored by the letter <letter> and the number <number>" * * this code should work, but doesn't because some of the helper * functions are broken. */ int main() { sponsorships *temp_day, *today; temp_day = init_day(); today = init_day(); printf("day letter number: "); scanf("%s %c %d", temp_day->day, &(temp_day->letter), &(temp_day->number)); copy_struct(today, temp_day); destroy(temp_day); print_struct(today); destroy(today); return 0; }
/** Decode an escape sequence that begins with SS3 (SINGLE SHIFT 3). * These are used for application cursor keys and the application keypad. * @returns one of: * - -1 if the escape sequence is not yet complete; the caller sets a timer. * - 0 if the escape sequence should be parsed by some other function. * - The length of the escape sequence otherwise. * Returning >0 does not imply this function has altered @a *ev. */ static int decode_terminal_application_key(struct itrm *itrm, struct interlink_event *ev) { unsigned char c; struct interlink_event_keyboard kbd = { KBD_UNDEF, KBD_MOD_NONE }; assert(itrm->in.queue.len >= 2); assert(itrm->in.queue.data[0] == ASCII_ESC); assert(itrm->in.queue.data[1] == 0x4F); /* == 'O', incidentally */ if_assert_failed return 0; if (itrm->in.queue.len < 3) return -1; /* According to ECMA-35 section 8.4, a single (possibly multibyte) * character follows the SS3. We now assume the code identifies * GL as the single-shift area and the designated set has 94 * characters. */ c = itrm->in.queue.data[2]; if (c < 0x21 || c > 0x7E) return 0; switch (c) { /* Terminfo $TERM */ case ' ': kbd.key = ' '; break; /* xterm */ case 'A': kbd.key = KBD_UP; break; /* kcuu1 vt100 */ case 'B': kbd.key = KBD_DOWN; break; /* kcud1 vt100 */ case 'C': kbd.key = KBD_RIGHT; break; /* kcuf1 vt100 */ case 'D': kbd.key = KBD_LEFT; break; /* kcub1 vt100 */ case 'F': kbd.key = KBD_END; break; /* kend xterm */ case 'H': kbd.key = KBD_HOME; break; /* khome xterm */ case 'I': kbd.key = KBD_TAB; break; /* xterm */ case 'M': kbd.key = KBD_ENTER; break; /* kent vt100 */ /* FIXME: xterm generates ESC O 2 P for Shift-PF1 */ case 'P': kbd.key = KBD_F1; break; /* kf1 vt100 */ case 'Q': kbd.key = KBD_F2; break; /* kf2 vt100 */ case 'R': kbd.key = KBD_F3; break; /* kf3 vt100 */ case 'S': kbd.key = KBD_F4; break; /* kf4 vt100 */ case 'X': kbd.key = '='; break; /* xterm */ case 'j': case 'k': case 'l': case 'm': /* *+,- xterm */ case 'n': case 'o': case 'p': case 'q': /* ./01 xterm */ case 'r': case 's': case 't': case 'u': /* 2345 xterm */ case 'v': case 'w': case 'x': case 'y': /* 6789 xterm */ kbd.key = c - 'p' + '0'; break; } if (kbd.key != KBD_UNDEF) copy_struct(&ev->info.keyboard, &kbd); return 3; /* even if we didn't recognize it */ }
BIF_RETTYPE port_set_data_2(BIF_ALIST_2) { /* * This is not a signal. See comment above. */ erts_aint_t data; Port* prt; prt = data_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_ERROR(BIF_P, BADARG); if (is_immed(BIF_ARG_2)) { data = (erts_aint_t) BIF_ARG_2; ASSERT((data & 0x3) != 0); } else { ErtsPortDataHeap *pdhp; Uint hsize; Eterm *hp; hsize = size_object(BIF_ARG_2); pdhp = erts_alloc(ERTS_ALC_T_PORT_DATA_HEAP, sizeof(ErtsPortDataHeap) + (hsize-1)*sizeof(Eterm)); hp = &pdhp->heap[0]; pdhp->off_heap.first = NULL; pdhp->off_heap.overhead = 0; pdhp->hsize = hsize; pdhp->data = copy_struct(BIF_ARG_2, hsize, &hp, &pdhp->off_heap); data = (erts_aint_t) pdhp; ASSERT((data & 0x3) == 0); } data = erts_smp_atomic_xchg_wb(&prt->data, data); if (data == (erts_aint_t)NULL) { /* Port terminated by racing thread */ data = erts_smp_atomic_xchg_wb(&prt->data, data); ASSERT(data != (erts_aint_t)NULL); cleanup_old_port_data(data); BIF_ERROR(BIF_P, BADARG); } cleanup_old_port_data(data); BIF_RET(am_true); }
BIF_RETTYPE port_get_data_1(BIF_ALIST_1) { BIF_RETTYPE res; Port* prt; Eterm portid = BIF_ARG_1; prt = id_or_name2port(BIF_P, portid); if (!prt) { BIF_ERROR(BIF_P, BADARG); } if (prt->bp == NULL) { /* MUST be CONST! */ res = prt->data; } else { Eterm* hp = HAlloc(BIF_P, prt->bp->used_size); res = copy_struct(prt->data, prt->bp->used_size, &hp, &MSO(BIF_P)); } erts_smp_port_unlock(prt); BIF_RET(res); }
static void insert_node_referrer(ReferredNode *referred_node, int type, Eterm id) { NodeReferrer *nrp; for(nrp = referred_node->referrers; nrp; nrp = nrp->next) if(EQ(id, nrp->id)) break; if(!nrp) { nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(NodeReferrer)); nrp->next = referred_node->referrers; referred_node->referrers = nrp; if(IS_CONST(id)) nrp->id = id; else { Uint *hp = &nrp->id_heap[0]; ASSERT(is_big(id) || is_tuple(id)); nrp->id = copy_struct(id, size_object(id), &hp, NULL); } nrp->heap_ref = 0; nrp->link_ref = 0; nrp->monitor_ref = 0; nrp->ets_ref = 0; nrp->bin_ref = 0; nrp->timer_ref = 0; nrp->system_ref = 0; } switch (type) { case HEAP_REF: nrp->heap_ref++; break; case LINK_REF: nrp->link_ref++; break; case ETS_REF: nrp->ets_ref++; break; case BIN_REF: nrp->bin_ref++; break; case MONITOR_REF: nrp->monitor_ref++; break; case TIMER_REF: nrp->timer_ref++; break; case SYSTEM_REF: nrp->system_ref++; break; default: ASSERT(0); } }
static void insert_dist_referrer(ReferredDist *referred_dist, int type, Eterm id, Uint creation) { DistReferrer *drp; for(drp = referred_dist->referrers; drp; drp = drp->next) if(id == drp->id && (type == CTRL_REF || creation == drp->creation)) break; if(!drp) { drp = (DistReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(DistReferrer)); drp->next = referred_dist->referrers; referred_dist->referrers = drp; if(IS_CONST(id)) drp->id = id; else { Uint *hp = &drp->id_heap[0]; ASSERT(is_tuple(id)); drp->id = copy_struct(id, size_object(id), &hp, NULL); } drp->creation = creation; drp->heap_ref = 0; drp->node_ref = 0; drp->ctrl_ref = 0; drp->system_ref = 0; } switch (type) { case NODE_REF: drp->node_ref++; break; case CTRL_REF: drp->ctrl_ref++; break; case HEAP_REF: drp->heap_ref++; break; case SYSTEM_REF: drp->system_ref++; break; default: ASSERT(0); } }
/* * Copy object "obj" to process p. */ Eterm copy_object(Eterm obj, Process* to) { Uint size = size_object(obj); Eterm* hp = HAlloc(to, size); Eterm res; #ifdef USE_VM_PROBES if (DTRACE_ENABLED(copy_object)) { DTRACE_CHARBUF(proc_name, 64); erts_snprintf(proc_name, sizeof(proc_name), "%T", to->common.id); DTRACE2(copy_object, proc_name, size); } #endif res = copy_struct(obj, size, &hp, &to->off_heap); #ifdef DEBUG if (eq(obj, res) == 0) { erl_exit(ERTS_ABORT_EXIT, "copy not equal to source\n"); } #endif return res; }
static void sort_and_display_entries(FSP_DIR *dir, const unsigned char dircolor[]) { /* fsp_readdir_native in fsplib 0.9 and earlier requires * the third parameter to point to a non-null pointer * even though it does not dereference that pointer * and overwrites it with another one anyway. * http://sourceforge.net/tracker/index.php?func=detail&aid=1875210&group_id=93841&atid=605738 * Work around the bug by using non-null &tmp. * Nothing will actually read or write tmp. */ FSP_RDENTRY fentry, tmp, *table = NULL; FSP_RDENTRY *fresult = &tmp; int size = 0; int i; while (!fsp_readdir_native(dir, &fentry, &fresult)) { FSP_RDENTRY *new_table; if (!fresult) break; if (!strcmp(fentry.name, ".")) continue; new_table = mem_realloc(table, (size + 1) * sizeof(*table)); if (!new_table) continue; table = new_table; copy_struct(&table[size], &fentry); size++; } /* If size==0, then table==NULL. According to ISO/IEC 9899:1999 * 7.20.5p1, the NULL must not be given to qsort. */ if (size > 0) qsort(table, size, sizeof(*table), compare); for (i = 0; i < size; i++) { display_entry(&table[i], dircolor); } }
static int setraw(struct itrm *itrm, int save_orig) { struct termios t; long vdisable = -1; memset(&t, 0, sizeof(t)); if (tcgetattr(itrm->in.ctl, &t)) return -1; if (save_orig) copy_struct(&itrm->t, &t); #ifdef _POSIX_VDISABLE vdisable = _POSIX_VDISABLE; #elif defined(HAVE_FPATHCONF) vdisable = fpathconf(itrm->in.ctl, _PC_VDISABLE); #endif #ifdef VERASE /* Is VERASE defined on Windows? */ if (vdisable != -1 && t.c_cc[VERASE] == vdisable) itrm->verase = -1; else itrm->verase = (unsigned char) t.c_cc[VERASE]; #else itrm->verase = -1; #endif elinks_cfmakeraw(&t); t.c_lflag |= ISIG; #ifdef TOSTOP t.c_lflag |= TOSTOP; #endif t.c_oflag |= OPOST; if (tcsetattr(itrm->in.ctl, TCSANOW, &t)) return -1; return 0; }
void erts_move_msg_attached_data_to_heap(ErtsHeapFactory* factory, ErlMessage *msg) { if (is_value(ERL_MESSAGE_TERM(msg))) erts_move_msg_mbuf_to_heap(&factory->hp, factory->off_heap, msg); else if (msg->data.dist_ext) { ASSERT(msg->data.dist_ext->heap_size >= 0); if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) { ErlHeapFragment *heap_frag; heap_frag = erts_dist_ext_trailer(msg->data.dist_ext); ERL_MESSAGE_TOKEN(msg) = copy_struct(ERL_MESSAGE_TOKEN(msg), heap_frag->used_size, &factory->hp, factory->off_heap); erts_cleanup_offheap(&heap_frag->off_heap); } ERL_MESSAGE_TERM(msg) = erts_decode_dist_ext(factory, msg->data.dist_ext); erts_free_dist_ext_copy(msg->data.dist_ext); msg->data.dist_ext = NULL; } /* else: bad external detected when calculating size */ }
Sint erts_move_messages_off_heap(Process *c_p) { int reds = 1; /* * Move all messages off heap. This *only* occurs when the * process had off heap message disabled and just enabled * it... */ ErtsMessage *mp; reds += c_p->msg.len / 10; ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); for (mp = c_p->msg.first; mp; mp = mp->next) { Uint msg_sz, token_sz; #ifdef USE_VM_PROBES Uint utag_sz; #endif Eterm *hp; ErlHeapFragment *hfrag; if (mp->data.attached) continue; if (is_immed(ERL_MESSAGE_TERM(mp)) #ifdef USE_VM_PROBES && is_immed(ERL_MESSAGE_DT_UTAG(mp)) #endif && is_not_immed(ERL_MESSAGE_TOKEN(mp))) continue; /* * The message refers into the heap. Copy the message * from the heap into a heap fragment and attach * it to the message... */ msg_sz = size_object(ERL_MESSAGE_TERM(mp)); #ifdef USE_VM_PROBES utag_sz = size_object(ERL_MESSAGE_DT_UTAG(mp)); #endif token_sz = size_object(ERL_MESSAGE_TOKEN(mp)); hfrag = new_message_buffer(msg_sz #ifdef USE_VM_PROBES + utag_sz #endif + token_sz); hp = hfrag->mem; if (is_not_immed(ERL_MESSAGE_TERM(mp))) ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp), msg_sz, &hp, &hfrag->off_heap); if (is_not_immed(ERL_MESSAGE_TOKEN(mp))) ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp), token_sz, &hp, &hfrag->off_heap); #ifdef USE_VM_PROBES if (is_not_immed(ERL_MESSAGE_DT_UTAG(mp))) ERL_MESSAGE_DT_UTAG(mp) = copy_struct(ERL_MESSAGE_DT_UTAG(mp), utag_sz, &hp, &hfrag->off_heap); #endif mp->data.heap_frag = hfrag; reds += 1; } return reds; }
static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I) { BIF_RETTYPE ret; if (am_scheduler == arg1) { ErtsSchedulerData *esdp; if (arg2 != am_type) goto badarg; esdp = erts_proc_sched_data(c_p); if (!esdp) goto scheduler_type_error; switch (esdp->type) { case ERTS_SCHED_NORMAL: ERTS_BIF_PREP_RET(ret, am_normal); break; case ERTS_SCHED_DIRTY_CPU: ERTS_BIF_PREP_RET(ret, am_dirty_cpu); break; case ERTS_SCHED_DIRTY_IO: ERTS_BIF_PREP_RET(ret, am_dirty_io); break; default: scheduler_type_error: ERTS_BIF_PREP_RET(ret, am_error); break; } } else if (am_error == arg1) { switch (arg2) { case am_notsup: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP); break; case am_undef: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF); break; case am_badarith: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH); break; case am_noproc: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC); break; case am_system_limit: ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT); break; case am_badarg: default: goto badarg; } } else if (am_copy == arg1) { int i; Eterm res; for (res = NIL, i = 0; i < 1000; i++) { Eterm *hp, sz; Eterm cpy; /* We do not want this to be optimized, but rather the oposite... */ sz = size_object(arg2); hp = HAlloc(c_p, sz); cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap); hp = HAlloc(c_p, 2); res = CONS(hp, cpy, res); } ERTS_BIF_PREP_RET(ret, res); } else if (am_send == arg1) { dirty_send_message(c_p, arg2, am_ok); ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("wait", arg1)) { if (!ms_wait(c_p, arg2, type == am_dirty_cpu)) goto badarg; ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("reschedule", arg1)) { /* * Reschedule operation after decrement of two until we reach * zero. Switch between dirty scheduler types when 'n' is * evenly divided by 4. If the initial value wasn't evenly * dividable by 2, throw badarg exception. */ Eterm next_type; Sint n; if (!term_to_Sint(arg2, &n) || n < 0) goto badarg; if (n == 0) ERTS_BIF_PREP_RET(ret, am_ok); else { Eterm argv[3]; Eterm eint = erts_make_integer((Uint) (n - 2), c_p); if (n % 4 != 0) next_type = type; else { switch (type) { case am_dirty_cpu: next_type = am_dirty_io; break; case am_dirty_io: next_type = am_normal; break; case am_normal: next_type = am_dirty_cpu; break; default: goto badarg; } } switch (next_type) { case am_dirty_io: argv[0] = arg1; argv[1] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_io_2, ERTS_SCHED_DIRTY_IO, am_erts_debug, am_dirty_io, 2); break; case am_dirty_cpu: argv[0] = arg1; argv[1] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_cpu_2, ERTS_SCHED_DIRTY_CPU, am_erts_debug, am_dirty_cpu, 2); break; case am_normal: argv[0] = am_normal; argv[1] = arg1; argv[2] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_3, ERTS_SCHED_NORMAL, am_erts_debug, am_dirty, 3); break; default: goto badarg; } } } else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) { ERTS_DECL_AM(ready); ERTS_DECL_AM(done); dirty_send_message(c_p, arg2, AM_ready); ms_wait(c_p, make_small(6000), 0); dirty_send_message(c_p, arg2, AM_done); ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) { Process *real_c_p = erts_proc_shadow2real(c_p); Eterm *hp, *hp2; Uint sz; int i; ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO; if (ERTS_PROC_IS_EXITING(real_c_p)) goto badarg; dirty_send_message(c_p, arg2, am_alive); /* Wait until dead */ while (!ERTS_PROC_IS_EXITING(real_c_p)) { if (dirty_io) ms_wait(c_p, make_small(100), 0); else erts_thr_yield(); } ms_wait(c_p, make_small(1000), 0); /* Should still be able to allocate memory */ hp = HAlloc(c_p, 3); /* Likely on heap */ sz = 10000; hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */ *hp2 = make_pos_bignum_header(sz); for (i = 1; i < sz; i++) hp2[i] = (Eterm) 4711; ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2))); } else { badarg: ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); } return ret; }
void erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, Eterm reason, Eterm token) { Eterm mess; Eterm save; Eterm from_copy; Uint sz_reason; Uint sz_token; Uint sz_from; Eterm* hp; Eterm temptoken; ErtsMessage* mp; ErlOffHeap *ohp; #ifdef SHCOPY_SEND erts_shcopy_t info; #endif if (have_seqtrace(token)) { ASSERT(is_tuple(token)); sz_token = size_object(token); sz_from = size_object(from); #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); sz_reason = copy_shared_calculate(reason, &info); #else sz_reason = size_object(reason); #endif mp = erts_alloc_message_heap(to, to_locksp, sz_reason + sz_from + sz_token + 4, &hp, &ohp); #ifdef SHCOPY_SEND mess = copy_shared_perform(reason, sz_reason, &info, &hp, ohp); DESTROY_SHCOPY(info); #else mess = copy_struct(reason, sz_reason, &hp, ohp); #endif from_copy = copy_struct(from, sz_from, &hp, ohp); save = TUPLE3(hp, am_EXIT, from_copy, mess); hp += 4; /* the trace token must in this case be updated by the caller */ seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL); temptoken = copy_struct(token, sz_token, &hp, ohp); ERL_MESSAGE_TOKEN(mp) = temptoken; erts_queue_message(to, *to_locksp, mp, save, am_system); } else { sz_from = IS_CONST(from) ? 0 : size_object(from); #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); sz_reason = copy_shared_calculate(reason, &info); #else sz_reason = size_object(reason); #endif mp = erts_alloc_message_heap(to, to_locksp, sz_reason+sz_from+4, &hp, &ohp); #ifdef SHCOPY_SEND mess = copy_shared_perform(reason, sz_reason, &info, &hp, ohp); DESTROY_SHCOPY(info); #else mess = copy_struct(reason, sz_reason, &hp, ohp); #endif from_copy = (IS_CONST(from) ? from : copy_struct(from, sz_from, &hp, ohp)); save = TUPLE3(hp, am_EXIT, from_copy, mess); erts_queue_message(to, *to_locksp, mp, save, am_system); } }
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *bp, Uint size, Eterm *brefs, Uint brefs_size) { #ifdef DEBUG int i; #endif #ifdef HARD_DEBUG ErlHeapFragment *dbg_bp; Eterm *dbg_brefs; Uint dbg_size; Uint dbg_tot_size; Eterm *dbg_hp; #endif ErlHeapFragment* nbp; #ifdef DEBUG { Uint off_sz = size < bp->used_size ? size : bp->used_size; for (i = 0; i < brefs_size; i++) { Eterm *ptr; if (is_immed(brefs[i])) continue; ptr = ptr_val(brefs[i]); ASSERT(&bp->mem[0] <= ptr && ptr < &bp->mem[0] + off_sz); } } #endif if (size >= (bp->used_size - bp->used_size / 16)) { bp->used_size = size; return bp; } #ifdef HARD_DEBUG dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size); dbg_bp = new_message_buffer(bp->used_size); dbg_hp = dbg_bp->mem; dbg_tot_size = 0; for (i = 0; i < brefs_size; i++) { dbg_size = size_object(brefs[i]); dbg_tot_size += dbg_size; dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp, &dbg_bp->off_heap); } ASSERT(dbg_tot_size == (size < bp->used_size ? size : bp->used_size)); #endif nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG, (void *) bp, ERTS_HEAP_FRAG_SIZE(bp->alloc_size), ERTS_HEAP_FRAG_SIZE(size)); if (bp != nbp) { Uint off_sz = size < nbp->used_size ? size : nbp->used_size; Eterm *sp = &bp->mem[0]; Eterm *ep = sp + off_sz; Sint offs = &nbp->mem[0] - sp; erts_offset_off_heap(&nbp->off_heap, offs, sp, ep); erts_offset_heap(&nbp->mem[0], off_sz, offs, sp, ep); if (brefs && brefs_size) erts_offset_heap_ptr(brefs, brefs_size, offs, sp, ep); #ifdef DEBUG for (i = 0; i < brefs_size; i++) { Eterm *ptr; if (is_immed(brefs[i])) continue; ptr = ptr_val(brefs[i]); ASSERT(&nbp->mem[0] <= ptr && ptr < &nbp->mem[0] + off_sz); } #endif } nbp->alloc_size = size; nbp->used_size = size; #ifdef HARD_DEBUG for (i = 0; i < brefs_size; i++) ASSERT(eq(dbg_brefs[i], brefs[i])); free_message_buffer(dbg_bp); erts_free(ERTS_ALC_T_UNDEF, dbg_brefs); #endif return nbp; }
Sint erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, Eterm message, unsigned flags) { Uint msize; ErtsMessage* mp; ErlOffHeap *ohp; Eterm token = NIL; Sint res = 0; #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; Eterm utag = NIL; #endif erts_aint32_t receiver_state; #ifdef SHCOPY_SEND erts_shcopy_t info; #else erts_literal_area_t litarea; INITIALIZE_LITERAL_PURGE_AREA(litarea); #endif #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id); erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id); } #endif receiver_state = erts_atomic32_read_nob(&receiver->state); if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; Eterm stoken = SEQ_TRACE_TOKEN(sender); Uint seq_trace_size = 0; #ifdef USE_VM_PROBES Uint dt_utag_size = 0; #endif /* SHCOPY corrupts the heap between * copy_shared_calculate, and * copy_shared_perform. (it inserts move_markers like the gc). * Make sure we don't use the heap between those instances. */ if (have_seqtrace(stoken)) { seq_trace_update_send(sender); seq_trace_output(stoken, message, SEQ_TRACE_SEND, receiver->common.id, sender); seq_trace_size = 6; /* TUPLE5 */ } #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { dt_utag_size = size_object(DT_UTAG(sender)); } else if (stoken == am_have_dt_utag ) { stoken = NIL; } #endif #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else msize = size_object_litopt(message, &litarea); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, receiver_locks, (msize #ifdef USE_VM_PROBES + dt_utag_size #endif + seq_trace_size), &hp, &ohp); #ifdef SHCOPY_SEND if (is_not_immed(message)) message = copy_shared_perform(message, msize, &info, &hp, ohp); DESTROY_SHCOPY(info); #else if (is_not_immed(message)) message = copy_struct_litopt(message, msize, &hp, ohp, &litarea); #endif if (is_immed(stoken)) token = stoken; else token = copy_struct(stoken, seq_trace_size, &hp, ohp); #ifdef USE_VM_PROBES if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) { if (is_immed(DT_UTAG(sender))) utag = DT_UTAG(sender); else utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, ohp); } if (DTRACE_ENABLED(message_send)) { if (have_seqtrace(stoken)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken)); } DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); } #endif } else { Eterm *hp; if (receiver == sender && !(receiver_state & ERTS_PSFLG_OFF_HEAP_MSGQ)) { mp = erts_alloc_message(0, NULL); msize = 0; } else { #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else msize = size_object_litopt(message, &litarea); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, receiver_locks, msize, &hp, &ohp); #ifdef SHCOPY_SEND if (is_not_immed(message)) message = copy_shared_perform(message, msize, &info, &hp, ohp); DESTROY_SHCOPY(info); #else if (is_not_immed(message)) message = copy_struct_litopt(message, msize, &hp, ohp, &litarea); #endif } #ifdef USE_VM_PROBES DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); #endif } ERL_MESSAGE_TOKEN(mp) = token; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = utag; #endif res = queue_message(receiver, &receiver_state, *receiver_locks, mp, message, sender->common.id); return res; }
int erts_decode_dist_message(Process *proc, ErtsProcLocks proc_locks, ErtsMessage *msgp, int force_off_heap) { ErtsHeapFactory factory; Eterm msg; ErlHeapFragment *bp; Sint need; int decode_in_heap_frag; decode_in_heap_frag = (force_off_heap || !(proc_locks & ERTS_PROC_LOCK_MAIN) || (proc->flags & F_OFF_HEAP_MSGQ)); if (msgp->data.dist_ext->heap_size >= 0) need = msgp->data.dist_ext->heap_size; else { need = erts_decode_dist_ext_size(msgp->data.dist_ext); if (need < 0) { /* bad msg; remove it... */ if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) { bp = erts_dist_ext_trailer(msgp->data.dist_ext); erts_cleanup_offheap(&bp->off_heap); } erts_free_dist_ext_copy(msgp->data.dist_ext); msgp->data.dist_ext = NULL; return 0; } msgp->data.dist_ext->heap_size = need; } if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) { bp = erts_dist_ext_trailer(msgp->data.dist_ext); need += bp->used_size; } if (decode_in_heap_frag) erts_factory_heap_frag_init(&factory, new_message_buffer(need)); else erts_factory_proc_prealloc_init(&factory, proc, need); ASSERT(msgp->data.dist_ext->heap_size >= 0); if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) { ErlHeapFragment *heap_frag; heap_frag = erts_dist_ext_trailer(msgp->data.dist_ext); ERL_MESSAGE_TOKEN(msgp) = copy_struct(ERL_MESSAGE_TOKEN(msgp), heap_frag->used_size, &factory.hp, factory.off_heap); erts_cleanup_offheap(&heap_frag->off_heap); } msg = erts_decode_dist_ext(&factory, msgp->data.dist_ext); ERL_MESSAGE_TERM(msgp) = msg; erts_free_dist_ext_copy(msgp->data.dist_ext); msgp->data.attached = NULL; if (is_non_value(msg)) { erts_factory_undo(&factory); return 0; } erts_factory_trim_and_close(&factory, msgp->m, ERL_MESSAGE_REF_ARRAY_SZ); ASSERT(!msgp->data.heap_frag); if (decode_in_heap_frag) msgp->data.heap_frag = factory.heap_frags; return 1; }
static Eterm reference_table_term(Uint **hpp, Uint *szp) { #undef MK_2TUP #undef MK_3TUP #undef MK_CONS #undef MK_UINT #define MK_2TUP(E1, E2) erts_bld_tuple(hpp, szp, 2, (E1), (E2)) #define MK_3TUP(E1, E2, E3) erts_bld_tuple(hpp, szp, 3, (E1), (E2), (E3)) #define MK_CONS(CAR, CDR) erts_bld_cons(hpp, szp, (CAR), (CDR)) #define MK_UINT(UI) erts_bld_uint(hpp, szp, (UI)) int i; Eterm tup; Eterm tup2; Eterm nl = NIL; Eterm dl = NIL; Eterm nrid; for(i = 0; i < no_referred_nodes; i++) { NodeReferrer *nrp; Eterm nril = NIL; for(nrp = referred_nodes[i].referrers; nrp; nrp = nrp->next) { Eterm nrl = NIL; /* NodeReferenceList = [{ReferenceType,References}] */ if(nrp->heap_ref) { tup = MK_2TUP(AM_heap, MK_UINT(nrp->heap_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->link_ref) { tup = MK_2TUP(AM_link, MK_UINT(nrp->link_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->monitor_ref) { tup = MK_2TUP(AM_monitor, MK_UINT(nrp->monitor_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->ets_ref) { tup = MK_2TUP(AM_ets, MK_UINT(nrp->ets_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->bin_ref) { tup = MK_2TUP(AM_binary, MK_UINT(nrp->bin_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->timer_ref) { tup = MK_2TUP(AM_timer, MK_UINT(nrp->timer_ref)); nrl = MK_CONS(tup, nrl); } if(nrp->system_ref) { tup = MK_2TUP(AM_system, MK_UINT(nrp->system_ref)); nrl = MK_CONS(tup, nrl); } nrid = nrp->id; if (!IS_CONST(nrp->id)) { Uint nrid_sz = size_object(nrp->id); if (szp) *szp += nrid_sz; if (hpp) nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL); } if (is_internal_pid(nrid) || nrid == am_error_logger) { ASSERT(!nrp->ets_ref && !nrp->bin_ref && !nrp->system_ref); tup = MK_2TUP(AM_process, nrid); } else if (is_tuple(nrid)) { Eterm *t; ASSERT(!nrp->ets_ref && !nrp->bin_ref); t = tuple_val(nrid); ASSERT(2 == arityval(t[0])); tup = MK_2TUP(t[1], t[2]); } else if(is_internal_port(nrid)) { ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref && !nrp->timer_ref && !nrp->system_ref); tup = MK_2TUP(AM_port, nrid); } else if(nrp->ets_ref) { ASSERT(!nrp->heap_ref && !nrp->link_ref && !nrp->monitor_ref && !nrp->bin_ref && !nrp->timer_ref && !nrp->system_ref); tup = MK_2TUP(AM_ets, nrid); } else if(nrp->bin_ref) { ASSERT(is_small(nrid) || is_big(nrid)); ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->link_ref && !nrp->monitor_ref && !nrp->timer_ref && !nrp->system_ref); tup = MK_2TUP(AM_match_spec, nrid); } else { ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref); ASSERT(is_atom(nrid)); tup = MK_2TUP(AM_dist, nrid); } tup = MK_2TUP(tup, nrl); /* NodeReferenceIdList = [{{ReferrerType, ID}, NodeReferenceList}] */ nril = MK_CONS(tup, nril); } /* NodeList = [{{Node, Creation}, Refc, NodeReferenceIdList}] */ tup = MK_2TUP(referred_nodes[i].node->sysname, MK_UINT(referred_nodes[i].node->creation)); tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 1)), nril); nl = MK_CONS(tup, nl); } for(i = 0; i < no_referred_dists; i++) { DistReferrer *drp; Eterm dril = NIL; for(drp = referred_dists[i].referrers; drp; drp = drp->next) { Eterm drl = NIL; /* DistReferenceList = [{ReferenceType,References}] */ if(drp->node_ref) { tup = MK_2TUP(AM_node, MK_UINT(drp->node_ref)); drl = MK_CONS(tup, drl); } if(drp->ctrl_ref) { tup = MK_2TUP(AM_control, MK_UINT(drp->ctrl_ref)); drl = MK_CONS(tup, drl); } if (is_internal_pid(drp->id)) { ASSERT(drp->ctrl_ref && !drp->node_ref); tup = MK_2TUP(AM_process, drp->id); } else if(is_internal_port(drp->id)) { ASSERT(drp->ctrl_ref && !drp->node_ref); tup = MK_2TUP(AM_port, drp->id); } else { ASSERT(!drp->ctrl_ref && drp->node_ref); ASSERT(is_atom(drp->id)); tup = MK_2TUP(drp->id, MK_UINT(drp->creation)); tup = MK_2TUP(AM_node, tup); } tup = MK_2TUP(tup, drl); /* DistReferenceIdList = [{{ReferrerType, ID}, DistReferenceList}] */ dril = MK_CONS(tup, dril); } /* DistList = [{Dist, Refc, ReferenceIdList}] */ tup = MK_3TUP(referred_dists[i].dist->sysname, MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 1)), dril); dl = MK_CONS(tup, dl); } /* {{node_references, NodeList}, {dist_references, DistList}} */ tup = MK_2TUP(AM_node_references, nl); tup2 = MK_2TUP(AM_dist_references, dl); tup = MK_2TUP(tup, tup2); return tup; #undef MK_2TUP #undef MK_3TUP #undef MK_CONS #undef MK_UINT }