// // Flatten the valid iolist to the buffer of // appropriate size pointed to by ptr // uint8_t *iolist_flatten(term_t l, uint8_t *ptr) { if (is_nil(l)) return ptr; if (is_cons(l)) { do { uint32_t *term_data = peel_cons(l); term_t e = term_data[0]; if (is_int(e)) *ptr++ = int_value(e); else { assert(is_list(e) || (is_boxed(e) && is_binary(peel_boxed(e)))); ptr = iolist_flatten(e, ptr); } l = term_data[1]; if (is_boxed(l) && is_binary(peel_boxed(l))) return iolist_flatten(l, ptr); } while (is_cons(l)); assert(is_nil(l)); } else // is_binary() { bits_t bs, to; bits_get_real(peel_boxed(l), &bs); bits_init_buf(ptr, (bs.ends +7) /8, &to); ptr += (bs.ends - bs.starts) /8; bits_copy(&bs, &to); assert(bs.starts == bs.ends); } return ptr; }
void __ts(uint32_t *ip, uint32_t *cp, term_t *sp, term_t *sbot) { //term_t *sbot = proc_stack_bottom(proc); uint32_t *fi = backstep_to_func_info(ip); if (fi == 0) { printk("No current function\n"); return; } else printk("* %pt:%pt/%d +%ld\n", T(fi[1]), T(fi[2]), fi[3], ip-fi); if (cp == 0) // after allocate, before call/apply { cp = demasquerade_pointer(sp[0]); while (++sp < sbot) if (is_boxed(*sp) && is_cp(peel_boxed(*sp))) break; } do { if (cp[0] == shrink_ptr(int_code_end_label)) break; uint32_t *fi = backstep_to_func_info(cp); printk(" %pt:%pt/%d\n", T(fi[1]), T(fi[2]), fi[3]); cp = demasquerade_pointer(sp[0]); while (++sp < sbot) if (is_boxed(*sp) && is_cp(peel_boxed(*sp))) break; } while (sp < sbot); }
static int64_t iolist_size2(int depth, term_t l) { if (depth > IOLIST_MAX_DEPTH) return -TOO_DEEP; if (is_nil(l)) return 0; if (is_cons(l)) { int64_t size = 0; do { uint32_t *term_data = peel_cons(l); term_t e = term_data[0]; if (is_int(e)) { if (int_value(e) < 0 || int_value(e) > 255) return -BAD_ARG; size++; } else { if (!is_list(e) && (!is_boxed(e) || !is_binary(peel_boxed(e)))) return -BAD_ARG; int64_t s = iolist_size2(depth+1, e); if (s < 0) return s; size += s; } l = term_data[1]; if (is_boxed(l) && is_binary(peel_boxed(l))) { // odd list with binary tail allowed int64_t s = iolist_size2(depth+1, l); if (s < 0) return s; return size +s; } } while (is_cons(l)); if (!is_nil(l)) return -BAD_ARG; return size; } else if (is_boxed_binary(l)) { bits_t bs; bits_get_real(peel_boxed(l), &bs); int64_t bit_size = bit_size = bs.ends - bs.starts; if ((bit_size & 7) != 0) return -1; return bit_size /8; } else return -BAD_ARG; }
static int64_t bits_list_size2(int depth, term_t l) { if (depth > BITS_LIST_MAX_DEPTH) return -TOO_DEEP; if (is_nil(l)) return 0; if (is_cons(l)) { int64_t size = 0; do { uint32_t *term_data = peel_cons(l); term_t e = term_data[0]; if (is_int(e)) { if (int_value(e) < 0 || int_value(e) > 255) return -BAD_ARG; size += 8; } else { if (!is_list(e) && (!is_boxed(e) || !is_binary(peel_boxed(e)))) return -BAD_ARG; int64_t s = bits_list_size2(depth+1, e); if (s < 0) return s; size += s; } l = term_data[1]; if (is_boxed(l) && is_binary(peel_boxed(l))) { // odd list with binary tail allowed int64_t s = bits_list_size2(depth+1, l); if (s < 0) return s; size += s; if (size > MAX_BIT_SIZE) return -TOO_LONG; return size; } } while (is_cons(l)); if (!is_nil(l)) return -BAD_ARG; if (size > MAX_BIT_SIZE) return -TOO_LONG; return size; } else // is_binary() { bits_t bs; bits_get_real(peel_boxed(l), &bs); if (bs.ends - bs.starts > MAX_BIT_SIZE) return -TOO_LONG; return bs.ends - bs.starts; } }
static void print_lock2(char *prefix, Sint16 id, Eterm extra, Uint16 flags, char *suffix) { char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE ? erts_lock_order[id].name : "unknown"); if (is_boxed(extra)) erts_fprintf(stderr, "%s'%s:%p%s'%s%s", prefix, lname, boxed_val(extra), lock_type(flags), rw_op_str(flags), suffix); else erts_fprintf(stderr, "%s'%s:%T%s'%s%s", prefix, lname, extra, lock_type(flags), rw_op_str(flags), suffix); }
static Eterm pd_hash_get_all(Process *p, ProcDict *pd) { Eterm* hp; Eterm res = NIL; Eterm tmp, tmp2; unsigned int i; unsigned int num; if (pd == NULL) { return res; } num = HASH_RANGE(pd); hp = HAlloc(p, pd->numElements * 2); for (i = 0; i < num; ++i) { tmp = ARRAY_GET(pd, i); if (is_boxed(tmp)) { ASSERT(is_tuple(tmp)); res = CONS(hp, tmp, res); hp += 2; } else if (is_list(tmp)) { while (tmp != NIL) { tmp2 = TCAR(tmp); res = CONS(hp, tmp2, res); hp += 2; tmp = TCDR(tmp); } } } return res; }
term_t cbif_spawn_monitor1(proc_t *proc, term_t *regs) { term_t Fun = regs[0]; if (!is_boxed(Fun)) badarg(Fun); uint32_t *fdata = peel_boxed(Fun); if (boxed_tag(fdata) != SUBTAG_FUN) badarg(Fun); t_fun_t *f = (t_fun_t *)fdata; if (f->fe == 0) not_implemented("unloaded funs"); term_t ref = heap_make_ref(&proc->hp); proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_fun0_N(new_proc, f); if (x == 0) { uint64_t ref_id = local_ref_id(ref); x = monitor(ref_id, proc->pid, new_proc->pid); } if (x < 0) { // no need to demonitor proc_destroy(new_proc); if (x == -TOO_DEEP) fail(A_SYSTEM_LIMIT); else fail(A_NOT_SPAWNED); } return heap_tuple2(&proc->hp, new_proc->pid, ref); }
/* * Called from process_info/1,2. */ Eterm erts_dictionary_copy(Process *p, ProcDict *pd) { Eterm* hp; Eterm* heap_start; Eterm res = NIL; Eterm tmp, tmp2; unsigned int i, num; if (pd == NULL) { return res; } PD_CHECK(pd); num = HASH_RANGE(pd); heap_start = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, sizeof(Eterm) * pd->numElements * 2); for (i = 0; i < num; ++i) { tmp = ARRAY_GET(pd, i); if (is_boxed(tmp)) { ASSERT(is_tuple(tmp)); res = CONS(hp, tmp, res); hp += 2; } else if (is_list(tmp)) { while (tmp != NIL) { tmp2 = TCAR(tmp); res = CONS(hp, tmp2, res); hp += 2; tmp = TCDR(tmp); } } } res = copy_object(res, p); erts_free(ERTS_ALC_T_TMP, (void *) heap_start); return res; }
static void dump_element(int to, void *to_arg, Eterm x) { if (is_list(x)) { erts_print(to, to_arg, "H" WORD_FMT, list_val(x)); } else if (is_boxed(x)) { erts_print(to, to_arg, "H" WORD_FMT, boxed_val(x)); } else if (is_immed(x)) { if (is_atom(x)) { unsigned char* s = atom_tab(atom_val(x))->name; int len = atom_tab(atom_val(x))->len; int i; erts_print(to, to_arg, "A%X:", atom_tab(atom_val(x))->len); for (i = 0; i < len; i++) { erts_putc(to, to_arg, *s++); } } else if (is_small(x)) { erts_print(to, to_arg, "I%T", x); } else if (is_pid(x)) { erts_print(to, to_arg, "P%T", x); } else if (is_port(x)) { erts_print(to, to_arg, "p<%bpu.%bpu>", port_channel_no(x), port_number(x)); } else if (is_nil(x)) { erts_putc(to, to_arg, 'N'); } } }
int alm_print_term(ATERM t) { int count = 0; if (is_num(t)) count += printf("%.1lf", num_val(t)); else if (is_nil(t)) count += printf("[]"); else if (is_cons(t)) { count += printf("["); ATERM tmp = t; while (is_cons(tmp)) { count += alm_print_term(CAR(tmp)); if (is_cons(CDR(tmp))) { count += printf(","); } else if (!is_nil(CDR(tmp))) { count += printf("|"); count += alm_print_term(CDR(tmp)); } tmp = CDR(tmp); } count += printf("]"); } else if (is_boxed(t)) { ATERM *box = boxed_ptr(t); if (is_atom(*box)) count += printf("%.*s", (int) box[1].bin, (char*) (box + 2)); } else if (is_frame(t)) { count += printf("<frame/0x%.3llX>",frame_val(t)); } return count; }
/* Convert this MonoError to an exception if it's faulty or return NULL. The error object is cleant after. */ MonoException* mono_error_convert_to_exception (MonoError *target_error) { ERROR_DECL (error); MonoException *ex; /* Mempool stored error shouldn't be cleaned up */ g_assert (!is_boxed ((MonoErrorInternal*)target_error)); if (mono_error_ok (target_error)) return NULL; ex = mono_error_prepare_exception (target_error, error); if (!mono_error_ok (error)) { ERROR_DECL (second_chance); /*Try to produce the exception for the second error. FIXME maybe we should log about the original one*/ ex = mono_error_prepare_exception (error, second_chance); // We cannot reasonably handle double faults, maybe later. g_assert (mono_error_ok (second_chance)); mono_error_cleanup (error); } mono_error_cleanup (target_error); return ex; }
Eterm erts_pd_hash_get(Process *p, Eterm id) { unsigned int hval; Eterm tmp; ProcDict *pd = p->dictionary; if (pd == NULL) return am_undefined; hval = pd_hash_value(pd, id); tmp = ARRAY_GET(pd, hval); if (is_boxed(tmp)) { /* Tuple */ ASSERT(is_tuple(tmp)); if (EQ(tuple_val(tmp)[1], id)) { return tuple_val(tmp)[2]; } } else if (is_list(tmp)) { for (; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) { ; } if (tmp != NIL) { return tuple_val(TCAR(tmp))[2]; } } else if (is_not_nil(tmp)) { #ifdef DEBUG erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" "%T\n", p->common.id, __LINE__, tmp); #endif erl_exit(1, "Damaged process dictionary found during get/1."); } return am_undefined; }
term_t cbif_spawn_link1(proc_t *proc, term_t *regs) { term_t Fun = regs[0]; if (!is_boxed(Fun)) badarg(Fun); uint32_t *fdata = peel_boxed(Fun); if (boxed_tag(fdata) != SUBTAG_FUN) badarg(Fun); t_fun_t *f = (t_fun_t *)fdata; if (f->fe == 0) not_implemented("unloaded funs"); proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_fun0_N(new_proc, f); if (x == 0) x = inter_link_establish_N(&new_proc->links, proc->pid); if (x == 0) x = inter_link_establish_N(&proc->links, new_proc->pid); if (x < 0) { proc_destroy(new_proc); // no need to unlink, new_proc might have a link to proc but it was destroyed anyway if (x == -TOO_DEEP) fail(A_SYSTEM_LIMIT); else fail(A_NOT_SPAWNED); } return new_proc->pid; }
static Eterm pd_hash_get_keys(Process *p, Eterm value) { Eterm *hp; Eterm res = NIL; ProcDict *pd = p->dictionary; unsigned int i, num; Eterm tmp, tmp2; if (pd == NULL) { return res; } num = HASH_RANGE(pd); for (i = 0; i < num; ++i) { tmp = ARRAY_GET(pd, i); if (is_boxed(tmp)) { ASSERT(is_tuple(tmp)); if (EQ(tuple_val(tmp)[2], value)) { hp = HAlloc(p, 2); res = CONS(hp, tuple_val(tmp)[1], res); } } else if (is_list(tmp)) { while (tmp != NIL) { tmp2 = TCAR(tmp); if (EQ(tuple_val(tmp2)[2], value)) { hp = HAlloc(p, 2); res = CONS(hp, tuple_val(tmp2)[1], res); } tmp = TCDR(tmp); } } } return res; }
term_t cbif_spawn1(proc_t *proc, term_t *regs) { term_t Fun = regs[0]; if (!is_boxed(Fun)) badarg(Fun); uint32_t *fdata = peel_boxed(Fun); if (boxed_tag(fdata) != SUBTAG_FUN) badarg(Fun); t_fun_t *f = (t_fun_t *)fdata; if (f->fe == 0) not_implemented("unloaded funs"); if (fun_arity(fdata) != fun_num_free(fdata)) badarg(); proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_fun0_N(new_proc, f); if (x < 0) { proc_destroy(new_proc); if (x == -TOO_DEEP) fail(A_SYSTEM_LIMIT); else fail(A_NOT_SPAWNED); } return new_proc->pid; }
/* * BIF implementations */ static void pd_hash_erase(Process *p, Eterm id, Eterm *ret) { unsigned int hval; Eterm old; Eterm tmp; unsigned int range; *ret = am_undefined; if (p->dictionary == NULL) { return; } hval = pd_hash_value(p->dictionary, id); old = ARRAY_GET(p->dictionary, hval); if (is_boxed(old)) { /* Tuple */ ASSERT(is_tuple(old)); if (EQ(tuple_val(old)[1], id)) { array_put(&(p->dictionary), hval, NIL); --(p->dictionary->numElements); *ret = tuple_val(old)[2]; } } else if (is_list(old)) { /* Find cons cell for identical value */ Eterm* prev = &p->dictionary->data[hval]; for (tmp = *prev; tmp != NIL; prev = &TCDR(tmp), tmp = *prev) { if (EQ(tuple_val(TCAR(tmp))[1], id)) { *prev = TCDR(tmp); *ret = tuple_val(TCAR(tmp))[2]; --(p->dictionary->numElements); } } /* If there is only one element left in the list we must remove the list. */ old = ARRAY_GET(p->dictionary, hval); ASSERT(is_list(old)); if (is_nil(TCDR(old))) { array_put(&p->dictionary, hval, TCAR(old)); } } else if (is_not_nil(old)) { #ifdef DEBUG erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" "%T\n", p->common.id, __LINE__, old); #endif erl_exit(1, "Damaged process dictionary found during erase/1."); } if ((range = HASH_RANGE(p->dictionary)) > INITIAL_SIZE && range / 2 > (p->dictionary->numElements)) { shrink(p, ret); } }
term_t cbif_demonitor1(proc_t *proc, term_t *regs) { term_t MonRef = regs[0]; if (!is_boxed(MonRef) || boxed_tag(peel_boxed(MonRef)) != SUBTAG_REF) badarg(MonRef); if (!ref_is_local(MonRef)) badarg(MonRef); if (demonitor(local_ref_id(MonRef), proc->pid) < 0) badarg(MonRef); return A_TRUE; }
// // Flatten the valid bits list to the bits_t context // void bits_list_flatten(term_t l, bits_t *bs) { if (is_nil(l)) return; if (is_cons(l)) { do { uint32_t *term_data = peel_cons(l); term_t e = term_data[0]; if (is_int(e)) { int o = int_value(e); assert(o >= 0 && o < 256); bits_put_octet(bs, (uint8_t)o); } else { assert(is_list(e) || (is_boxed(e) && is_binary(peel_boxed(e)))); bits_list_flatten(e, bs); } l = term_data[1]; if (is_boxed(l) && is_binary(peel_boxed(l))) { bits_list_flatten(l, bs); return; } } while (is_cons(l)); assert(is_nil(l)); } else // is_binary() { bits_t source; bits_get_real(peel_boxed(l), &source); bits_copy(&source, bs); } }
static Uint lookup(HashTable* hash_table, Eterm key) { Uint mask = hash_table->mask; Eterm* table = hash_table->term; Uint32 idx = make_internal_hash(key, 0); Eterm term; do { idx++; term = table[idx & mask]; } while (is_boxed(term) && !EQ(key, (tuple_val(term))[1])); return idx & mask; }
static Eterm do_info(Process* c_p, TrapData* trap_data) { HashTable* hash_table; Uint remaining; Uint idx; Uint max_iter; hash_table = trap_data->table; idx = trap_data->idx; #if defined(DEBUG) || defined(VALGRIND) max_iter = 50; #else max_iter = ERTS_BIF_REDS_LEFT(c_p); #endif remaining = trap_data->remaining < max_iter ? trap_data->remaining : max_iter; trap_data->remaining -= remaining; while (remaining != 0) { if (is_boxed(hash_table->term[idx])) { ErtsLiteralArea* area; area = term_to_area(hash_table->term[idx]); trap_data->memory += sizeof(ErtsLiteralArea) + sizeof(Eterm) * (area->end - area->start - 1); remaining--; } idx++; } trap_data->idx = idx; if (trap_data->remaining > 0) { return am_ok; /* Dummy return value */ } else { Eterm* hp; Eterm count_term; Eterm memory_term; Eterm res; Uint memory; Uint hsz = MAP_SZ(2); memory = sizeof(HashTable) + (trap_data->table->allocated-1) * sizeof(Eterm) + trap_data->memory; (void) erts_bld_uint(NULL, &hsz, hash_table->num_entries); (void) erts_bld_uint(NULL, &hsz, memory); hp = HAlloc(c_p, hsz); count_term = erts_bld_uint(&hp, NULL, hash_table->num_entries); memory_term = erts_bld_uint(&hp, NULL, memory); res = MAP2(hp, am_count, count_term, am_memory, memory_term); return res; } }
BIF_RETTYPE persistent_term_get_1(BIF_ALIST_1) { Eterm key = BIF_ARG_1; HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table); Uint entry_index; Eterm term; entry_index = lookup(hash_table, key); term = hash_table->term[entry_index]; if (is_boxed(term)) { ASSERT(is_tuple_arity(term, 2)); BIF_RET(tuple_val(term)[2]); } BIF_ERROR(BIF_P, BADARG); }
static int term_order(term_t t) { if (is_cons(t)) return TERM_ORDER_CONS; if (is_tuple(t)) return TERM_ORDER_TUPLE; if (is_nil(t)) return TERM_ORDER_NIL; if (is_int(t)) return TERM_ORDER_NUMBER; if (is_atom(t)) return TERM_ORDER_ATOM; if (is_short_pid(t)) return TERM_ORDER_PID; if (is_short_oid(t)) return TERM_ORDER_OID; assert(is_boxed(t)); switch (boxed_tag(peel_boxed(t))) { case SUBTAG_POS_BIGNUM: case SUBTAG_NEG_BIGNUM: case SUBTAG_FLOAT: return TERM_ORDER_NUMBER; case SUBTAG_FUN: return TERM_ORDER_FUN; case SUBTAG_EXPORT: return TERM_ORDER_EXPORT; case SUBTAG_PID: return TERM_ORDER_PID; case SUBTAG_OID: return TERM_ORDER_OID; case SUBTAG_REF: return TERM_ORDER_REF; case SUBTAG_PROC_BIN: case SUBTAG_HEAP_BIN: case SUBTAG_MATCH_CTX: case SUBTAG_SUB_BIN: return TERM_ORDER_BINARY; default: fatal_error("subtag"); } }
BIF_RETTYPE persistent_term_get_2(BIF_ALIST_2) { Eterm key = BIF_ARG_1; Eterm result = BIF_ARG_2; HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table); Uint entry_index; Eterm term; entry_index = lookup(hash_table, key); term = hash_table->term[entry_index]; if (is_boxed(term)) { ASSERT(is_tuple_arity(term, 2)); result = tuple_val(term)[2]; } BIF_RET(result); }
/* Move all terms in heap fragments into heap. The terms must be guaranteed to * be contained within the fragments. The source terms are destructed with * move markers. * Typically used to copy a multi-fragmented message (from NIF). */ void move_multi_frags(Eterm** hpp, ErlOffHeap* off_heap, ErlHeapFragment* first, Eterm* refs, unsigned nrefs) { ErlHeapFragment* bp; Eterm* hp_start = *hpp; Eterm* hp_end; Eterm* hp; unsigned i; for (bp=first; bp!=NULL; bp=bp->next) { move_one_frag(hpp, bp->mem, bp->used_size, off_heap); off_heap->overhead += bp->off_heap.overhead; } hp_end = *hpp; for (hp=hp_start; hp<hp_end; ++hp) { Eterm* ptr; Eterm val; Eterm gval = *hp; switch (primary_tag(gval)) { case TAG_PRIMARY_BOXED: ptr = boxed_val(gval); val = *ptr; if (IS_MOVED_BOXED(val)) { ASSERT(is_boxed(val)); *hp = val; } break; case TAG_PRIMARY_LIST: ptr = list_val(gval); val = *ptr; if (IS_MOVED_CONS(val)) { *hp = ptr[1]; } break; case TAG_PRIMARY_HEADER: if (header_is_thing(gval)) { hp += thing_arityval(gval); } break; } } for (i=0; i<nrefs; ++i) { refs[i] = follow_moved(refs[i]); } }
void mono_error_cleanup (MonoError *oerror) { MonoErrorInternal *error = (MonoErrorInternal*)oerror; short int orig_error_code = error->error_code; gboolean free_strings = error->flags & MONO_ERROR_FREE_STRINGS; gboolean has_instance_handle = is_managed_exception (error); /* Two cleanups in a row without an intervening init. */ g_assert (orig_error_code != MONO_ERROR_CLEANUP_CALLED_SENTINEL); /* Mempool stored error shouldn't be cleaned up */ g_assert (!is_boxed (error)); /* Mark it as cleaned up. */ error->error_code = MONO_ERROR_CLEANUP_CALLED_SENTINEL; error->flags = 0; if (orig_error_code == MONO_ERROR_NONE) return; if (has_instance_handle) mono_gchandle_free (error->exn.instance_handle); g_free ((char*)error->full_message); g_free ((char*)error->full_message_with_fields); error->full_message = NULL; error->full_message_with_fields = NULL; if (!free_strings) //no memory was allocated return; g_free ((char*)error->type_name); g_free ((char*)error->assembly_name); g_free ((char*)error->member_name); g_free ((char*)error->exception_name_space); g_free ((char*)error->exception_name); g_free ((char*)error->first_argument); error->type_name = error->assembly_name = error->member_name = error->exception_name_space = error->exception_name = error->first_argument = NULL; error->exn.klass = NULL; }
void erts_init_persistent_dumping(void) { HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table); ErtsLiteralArea** area_p; Uint i; /* * Overwrite the array of Eterms in the current hash table * with pointers to literal areas. */ erts_persistent_areas = (ErtsLiteralArea **) hash_table->term; erts_num_persistent_areas = hash_table->num_entries; area_p = erts_persistent_areas; for (i = 0; i < hash_table->allocated; i++) { Eterm term = hash_table->term[i]; if (is_boxed(term)) { *area_p++ = term_to_area(term); } } }
BIF_RETTYPE hipe_bifs_show_term_1(BIF_ALIST_1) { Eterm obj = BIF_ARG_1; printf("0x%0*lx\r\n", 2*(int)sizeof(long), obj); do { Eterm *objp; int i, ary; if (is_list(obj)) { objp = list_val(obj); ary = 2; } else if (is_boxed(obj)) { Eterm header; objp = boxed_val(obj); header = objp[0]; if (is_thing(header)) ary = thing_arityval(header); else if (is_arity_value(header)) ary = arityval(header); else { printf("bad header %#lx\r\n", header); break; } ary += 1; } else break; for (i = 0; i < ary; ++i) printf("0x%0*lx: 0x%0*lx\r\n", 2*(int)sizeof(long), (unsigned long)&objp[i], 2*(int)sizeof(long), objp[i]); } while (0); erts_printf("%T", obj); printf("\r\n"); BIF_RET(am_true); }
uint64_t alm_dump_heap_item(ATERM t, int stack) { if (is_num(t)) printf("%18.1lf ", num_val(t)); else if (is_nil(t)) printf(" [] "); else if (is_cons(t)) { printf("<cons/0x%.3llX> ", (uint64_t)cons_ptr(t)); } else if (is_boxed(t)) { ATERM *box = boxed_ptr(t); if (is_atom(*box)) printf("<atom/0x%.3llX> ", (uint64_t)box); } else if (is_header(t)) { if (stack) printf("<fram/0x%.3llX> ",(uint64_t)frame_val(t)); else if (is_atom(t)) { printf("<atom/0x%.3llX> ",boxed_arity(t)); return boxed_arity(t)+1; } else printf("<frwd/0x%.3llX> ",(uint64_t)frame_val(t)); } return 1; }
static Eterm pd_hash_put(Process *p, Eterm id, Eterm value) { unsigned int hval; Eterm *hp; Eterm tpl; Eterm old; Eterm tmp; int needed; int i = 0; #ifdef DEBUG Eterm *hp_limit; #endif if (p->dictionary == NULL) { /* Create it */ array_put(&(p->dictionary), INITIAL_SIZE - 1, NIL); p->dictionary->homeSize = INITIAL_SIZE; } hval = pd_hash_value(p->dictionary, id); old = ARRAY_GET(p->dictionary, hval); /* * Calculate the number of heap words needed and garbage * collect if necessary. (Might be a slight overestimation.) */ needed = 3; /* {Key,Value} tuple */ if (is_boxed(old)) { /* * We don't want to compare keys twice, so we'll always * reserve the space for two CONS cells. */ needed += 2+2; } else if (is_list(old)) { i = 0; for (tmp = old; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) { ++i; } if (is_nil(tmp)) { i = -1; needed += 2; } else { needed += 2*(i+1); } } if (HeapWordsLeft(p) < needed) { Eterm root[3]; root[0] = id; root[1] = value; root[2] = old; BUMP_REDS(p, erts_garbage_collect(p, needed, root, 3)); id = root[0]; value = root[1]; old = root[2]; } #ifdef DEBUG hp_limit = p->htop + needed; #endif /* * Create the {Key,Value} tuple. */ hp = HeapOnlyAlloc(p, 3); tpl = TUPLE2(hp, id, value); /* * Update the dictionary. */ if (is_nil(old)) { array_put(&(p->dictionary), hval, tpl); ++(p->dictionary->numElements); } else if (is_boxed(old)) { ASSERT(is_tuple(old)); if (EQ(tuple_val(old)[1],id)) { array_put(&(p->dictionary), hval, tpl); return tuple_val(old)[2]; } else { hp = HeapOnlyAlloc(p, 4); tmp = CONS(hp, old, NIL); hp += 2; ++(p->dictionary->numElements); array_put(&(p->dictionary), hval, CONS(hp, tpl, tmp)); hp += 2; ASSERT(hp <= hp_limit); } } else if (is_list(old)) { if (i == -1) { /* * New key. Simply prepend the tuple to the beginning of the list. */ hp = HeapOnlyAlloc(p, 2); array_put(&(p->dictionary), hval, CONS(hp, tpl, old)); hp += 2; ASSERT(hp <= hp_limit); ++(p->dictionary->numElements); } else { /* * i = Number of CDRs to skip to reach the changed element in the list. * * Replace old value in list. To avoid pointers from the old generation * to the new, we must rebuild the list from the beginning up to and * including the changed element. */ Eterm nlist; int j; hp = HeapOnlyAlloc(p, (i+1)*2); /* Find the list element to change. */ for (j = 0, nlist = old; j < i; j++, nlist = TCDR(nlist)) { ; } ASSERT(EQ(tuple_val(TCAR(nlist))[1], id)); nlist = TCDR(nlist); /* Unchanged part of list. */ /* Rebuild list before the updated element. */ for (tmp = old; i-- > 0; tmp = TCDR(tmp)) { nlist = CONS(hp, TCAR(tmp), nlist); hp += 2; } ASSERT(EQ(tuple_val(TCAR(tmp))[1], id)); /* Put the updated element first in the new list. */ nlist = CONS(hp, tpl, nlist); hp += 2; ASSERT(hp <= hp_limit); array_put(&(p->dictionary), hval, nlist); return tuple_val(TCAR(tmp))[2]; } } else { #ifdef DEBUG erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" "%T\n", p->common.id, __LINE__, old); #endif erl_exit(1, "Damaged process dictionary found during put/2."); } if (HASH_RANGE(p->dictionary) <= p->dictionary->numElements) { grow(p); } return am_undefined; }
static uint32_t *terms_copy(stack_t *stack, term_t *terms, int num, uint32_t *htop, t_proc_bin_t **pbs) { next_term: if (num == 0) { if (stack_is_empty(stack)) return htop; ets_deferred_copy_t *pop = (ets_deferred_copy_t *)stack_pop(stack); terms = pop->terms; num = pop->num; goto next_term; } term_t t = terms[0]; if (is_immed(t)) { terms++; num--; goto next_term; } term_t copy = noval; if (is_cons(t)) { term_t *cons = peel_cons(t); copy = tag_cons(htop); term_t *new_cons = htop; do { new_cons[0] = cons[0]; new_cons[1] = cons[1]; htop += 2; if (!is_immed(new_cons[0])) DEFER_COPY(stack, new_cons, 1); term_t tail = new_cons[1]; if (is_immed(tail)) break; if (!is_cons(tail)) { DEFER_COPY(stack, new_cons +1, 1); break; } new_cons[1] = tag_cons(htop); cons = peel_cons(tail); new_cons = htop; } while (1); } else if (is_tuple(t)) { uint32_t *p = peel_tuple(t); int arity = *p++; if (arity == 0) copy = ZERO_TUPLE; else { copy = tag_tuple(htop); *htop++ = arity; memcpy(htop, p, arity *sizeof(term_t)); DEFER_COPY(stack, htop, arity); htop += arity; } } else { assert(is_boxed(t)); uint32_t *tdata = peel_boxed(t); copy = tag_boxed(htop); switch (boxed_tag(tdata)) { case SUBTAG_POS_BIGNUM: case SUBTAG_NEG_BIGNUM: { bignum_t *bn = (bignum_t *)tdata; int wsize = WSIZE(bignum_t) + (bn->used*sizeof(uint16_t) +3) /4; memcpy(htop, tdata, wsize *sizeof(uint32_t)); htop += wsize; break; } case SUBTAG_FLOAT: EASY_COPY(t_float_t); break; case SUBTAG_FUN: { t_fun_t *new_fun = (t_fun_t *)htop; int num_free = fun_num_free(tdata); int wsize = WSIZE(t_fun_t) + num_free; memcpy(new_fun, tdata, wsize *sizeof(uint32_t)); DEFER_COPY(stack, new_fun->frozen, num_free); htop += wsize; break; } case SUBTAG_EXPORT: EASY_COPY(t_export_t); break; case SUBTAG_PID: EASY_COPY(t_long_pid_t); break; case SUBTAG_OID: EASY_COPY(t_long_oid_t); break; case SUBTAG_REF: EASY_COPY(t_long_ref_t); break; case SUBTAG_PROC_BIN: { t_proc_bin_t *pb = (t_proc_bin_t *)htop; memcpy(htop, tdata, sizeof(t_proc_bin_t)); // 1+ bin node refc proc_bin_link(pbs, pb, 0); htop += WSIZE(t_proc_bin_t); break; } case SUBTAG_HEAP_BIN: { t_heap_bin_t *hb = (t_heap_bin_t *)tdata; int wsize = WSIZE(t_heap_bin_t) + (hb->byte_size +3) /4; memcpy(htop, tdata, wsize*sizeof(uint32_t)); htop += wsize; break; } case SUBTAG_MATCH_CTX: { t_match_ctx_t *new_mc = (t_match_ctx_t *)htop; memcpy(new_mc, tdata, sizeof(t_match_ctx_t)); DEFER_COPY(stack, &new_mc->parent, 1); htop += WSIZE(t_match_ctx_t); break; } default: // SUBTAG_SUB_BIN { assert(boxed_tag(tdata) == SUBTAG_SUB_BIN); t_sub_bin_t *new_sb = (t_sub_bin_t *)htop; memcpy(new_sb, tdata, sizeof(t_sub_bin_t)); DEFER_COPY(stack, &new_sb->parent, 1); htop += WSIZE(t_sub_bin_t); break; } } } assert(copy != noval); *terms++ = copy; num--; goto next_term; }