static ERTS_INLINE void posix_clock_gettime_times(clockid_t mid, char *mname, ErtsMonotonicTime *mtimep, clockid_t sid, char *sname, ErtsSystemTime *stimep) { struct timespec mts, sts; int mres, sres, merr, serr; mres = clock_gettime(mid, &mts); merr = errno; sres = clock_gettime(sid, &sts); serr = errno; if (mres != 0) { char *errstr = merr ? strerror(merr) : "unknown"; erl_exit(ERTS_ABORT_EXIT, "clock_gettime(%s, _) failed: %s (%d)\n", mname, errstr, merr); } if (sres != 0) { char *errstr = serr ? strerror(serr) : "unknown"; erl_exit(ERTS_ABORT_EXIT, "clock_gettime(%s, _) failed: %s (%d)\n", sname, errstr, serr); } *mtimep = (ErtsMonotonicTime) ERTS_TimeSpec2Sint64(&mts); *stimep = (ErtsSystemTime) ERTS_TimeSpec2Sint64(&sts); }
void erts_check_memory(Process *p, Eterm *start, Eterm *end) { Eterm *pos = start; while (pos < end) { Eterm hval = *pos++; #ifdef DEBUG if (hval == DEBUG_BAD_WORD) { print_untagged_memory(start, end); erl_exit(1, "Uninitialized HAlloc'ed memory found @ 0x%0*lx!\n", PTR_SIZE,(unsigned long)(pos - 1)); } #endif if (is_thing(hval)) { pos += thing_arityval(hval); continue; } if (verify_eterm(p,hval)) continue; erl_exit(1, "Wild pointer found @ 0x%0*lx!\n", PTR_SIZE,(unsigned long)(pos - 1)); } }
static void load_preloaded(void) { int i; Eterm res; Preload* preload_p; Eterm module_name; byte* code; char* name; int length; if ((preload_p = sys_preloaded()) == NULL) { return; } i = 0; while ((name = preload_p[i].name) != NULL) { length = preload_p[i].size; module_name = am_atom_put(name, sys_strlen(name)); if ((code = sys_preload_begin(&preload_p[i])) == 0) erl_exit(1, "Failed to find preloaded code for module %s\n", name); res = erts_preload_module(NULL, 0, NIL, &module_name, code, length); sys_preload_end(&preload_p[i]); if (res != NIL) erl_exit(1,"Failed loading preloaded module %s (%T)\n", name, res); i++; } }
void erts_check_stack(Process *p) { Eterm *elemp; Eterm *stack_start = p->heap + p->heap_sz; Eterm *stack_end = p->htop; if (p->stop > stack_start) erl_exit(1, "<%lu.%lu.%lu>: Stack underflow\n", internal_pid_channel_no(p->common.id), internal_pid_number(p->common.id), internal_pid_serial(p->common.id)); if (p->stop < stack_end) erl_exit(1, "<%lu.%lu.%lu>: Stack overflow\n", internal_pid_channel_no(p->common.id), internal_pid_number(p->common.id), internal_pid_serial(p->common.id)); for (elemp = p->stop; elemp < stack_start; elemp++) { int in_mbuf = 0; Eterm *ptr; ErlHeapFragment* mbuf; switch (primary_tag(*elemp)) { case TAG_PRIMARY_LIST: ptr = list_val(*elemp); break; case TAG_PRIMARY_BOXED: ptr = boxed_val(*elemp); break; default: /* Immediate or cp */ continue; } if (IN_HEAP(p, ptr)) continue; for (mbuf = p->mbuf; mbuf; mbuf = mbuf->next) if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->used_size)) { in_mbuf = 1; break; } if (in_mbuf) continue; erl_exit(1, "<%lu.%lu.%lu>: Wild stack pointer\n", internal_pid_channel_no(p->common.id), internal_pid_number(p->common.id), internal_pid_serial(p->common.id)); } }
static Eterm info_options(Allctr_t *allctr, char *prefix, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { BFAllctr_t *bfallctr = (BFAllctr_t *) allctr; Eterm res = THE_NON_VALUE; if (print_to_p) { erts_print(*print_to_p, print_to_arg, "%sas: %s\n", prefix, bfallctr->address_order ? "aobf" : "bf"); } if (hpp || szp) { if (!atoms_initialized) erl_exit(1, "%s:%d: Internal error: Atoms not initialized", __FILE__, __LINE__);; res = NIL; add_2tup(hpp, szp, &res, am.as, bfallctr->address_order ? am.aobf : am.bf); } return res; }
void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid, Eterm name) { void *tstack[STACK_NEED]; int tpos = 0; int dstack[STACK_NEED+1]; int dpos = 1; int state = 0; ErtsMonitor **this = root; Sint c; dstack[0] = DIR_END; for (;;) { if (!*this) { /* Found our place */ state = 1; *this = create_monitor(type,ref,pid,name); break; } else if ((c = CMP_MON_REF(ref,(*this)->ref)) < 0) { /* go left */ dstack[dpos++] = DIR_LEFT; tstack[tpos++] = this; this = &((*this)->left); } else if (c > 0) { /* go right */ dstack[dpos++] = DIR_RIGHT; tstack[tpos++] = this; this = &((*this)->right); } else { /* Equal key is an error for monitors */ erl_exit(1,"Insertion of already present monitor!"); break; } } insertion_rotation(dstack, dpos, tstack, tpos, state); }
static void pd_check(ProcDict *pd) { unsigned int i; Uint num; if (pd == NULL) return; ASSERT(pd->size >= pd->used); ASSERT(HASH_RANGE(pd) <= MAX_HASH); for (i = 0, num = 0; i < pd->used; ++i) { Eterm t = pd->data[i]; if (is_nil(t)) { continue; } else if (is_tuple(t)) { ++num; ASSERT(arityval(*tuple_val(t)) == 2); continue; } else if (is_list(t)) { while (t != NIL) { ++num; ASSERT(is_tuple(TCAR(t))); ASSERT(arityval(*(tuple_val(TCAR(t)))) == 2); t = TCDR(t); } continue; } else { erl_exit(1, "Found tag 0x%08x in process dictionary at position %d", (unsigned long) t, (int) i); } } ASSERT(num == pd->numElements); ASSERT(pd->splitPosition <= pd->homeSize); }
static Eterm http_bld_uri(struct packet_callback_args* pca, Eterm** hpp, Uint* szp, const PacketHttpURI* uri) { Eterm s1, s2; if (uri->type == URI_STAR) { return am_Times; /* '*' */ } s1 = http_bld_string(pca, hpp, szp, uri->s1_ptr, uri->s1_len); switch (uri->type) { case URI_ABS_PATH: return erts_bld_tuple(hpp, szp, 2, am_abs_path, s1); case URI_HTTP: case URI_HTTPS: s2 = http_bld_string(pca, hpp, szp, uri->s2_ptr, uri->s2_len); return erts_bld_tuple (hpp, szp, 5, am_absoluteURI, ((uri->type==URI_HTTP) ? am_http : am_https), s1, ((uri->port==0) ? am_undefined : make_small(uri->port)), s2); case URI_STRING: return s1; case URI_SCHEME: s2 = http_bld_string(pca, hpp, szp, uri->s2_ptr, uri->s2_len); return erts_bld_tuple(hpp, szp, 3, am_scheme, s1, s2); default: erl_exit(1, "%s, line %d: type=%u\n", __FILE__, __LINE__, uri->type); } }
static Eterm info_options(Allctr_t *allctr, char *prefix, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { GFAllctr_t *gfallctr = (GFAllctr_t *) allctr; Eterm res = THE_NON_VALUE; if (print_to_p) { erts_print(*print_to_p, print_to_arg, "%smbsd: %lu\n" "%sas: gf\n", prefix, gfallctr->max_blk_search, prefix); } if (hpp || szp) { if (!atoms_initialized) erl_exit(1, "%s:%d: Internal error: Atoms not initialized", __FILE__, __LINE__);; res = NIL; add_2tup(hpp, szp, &res, am.as, am.gf); add_2tup(hpp, szp, &res, am.mbsd, bld_uint(hpp, szp, gfallctr->max_blk_search)); } return res; }
/* ** init a pre allocated or static hash structure ** and allocate buckets. */ Hash* hash_init(ErtsAlcType_t type, Hash* h, char* name, int size, HashFunctions fun) { int sz; int ix = 0; h->type = type; while (h_size_table[ix] != -1 && h_size_table[ix] < size) ix++; if (h_size_table[ix] == -1) erl_exit(1, "panic: too large hash table size (%d)\n", size); size = h_size_table[ix]; sz = size*sizeof(HashBucket*); h->bucket = (HashBucket**) erts_alloc(h->type, sz); sys_memzero(h->bucket, sz); h->is_allocated = 0; h->name = name; h->fun = fun; h->size = size; h->size20percent = h->size/5; h->size80percent = (4*h->size)/5; h->ix = ix; h->used = 0; return h; }
static Eterm info_options(Allctr_t *allctr, char *prefix, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; if (print_to_p) { erts_print(*print_to_p, print_to_arg, "%sas: af\n", prefix); } if (hpp || szp) { if (!atoms_initialized) erl_exit(1, "%s:%d: Internal error: Atoms not initialized", __FILE__, __LINE__);; res = NIL; add_2tup(hpp, szp, &res, am.as, am.af); } return res; }
Eterm erts_pd_hash_get(Process *p, Eterm id) { unsigned int hval; Eterm tmp; ProcDict *pd = p->dictionary; if (pd == NULL) return am_undefined; hval = pd_hash_value(pd, id); tmp = ARRAY_GET(pd, hval); if (is_boxed(tmp)) { /* Tuple */ ASSERT(is_tuple(tmp)); if (EQ(tuple_val(tmp)[1], id)) { return tuple_val(tmp)[2]; } } else if (is_list(tmp)) { for (; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) { ; } if (tmp != NIL) { return tuple_val(TCAR(tmp))[2]; } } else if (is_not_nil(tmp)) { #ifdef DEBUG erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" "%T\n", p->common.id, __LINE__, tmp); #endif erl_exit(1, "Damaged process dictionary found during get/1."); } return am_undefined; }
/* this routine links the time cells into a free list at the start and sets the time queue as empty */ void erts_init_time(void) { int i, itime; /* system dependent init; must be done before do_time_init() if timer thread is enabled */ itime = erts_init_time_sup(); #ifdef TIW_ITIME_IS_CONSTANT if (itime != TIW_ITIME) { erl_exit(ERTS_ABORT_EXIT, "timer resolution mismatch %d != %d", itime, TIW_ITIME); } #else tiw_itime = itime; #endif erts_smp_mtx_init(&tiw_lock, "timer_wheel"); tiw = (ErlTimer**) erts_alloc(ERTS_ALC_T_TIMER_WHEEL, TIW_SIZE * sizeof(ErlTimer*)); for(i = 0; i < TIW_SIZE; i++) tiw[i] = NULL; do_time_init(); tiw_pos = tiw_nto = 0; tiw_min_ptr = NULL; tiw_min = 0; }
static void get_tolerant_timeofday(SysTimeval *tv) { SysHrTime diff_time, curr; if (erts_disable_tolerant_timeofday) { sys_gettimeofday(tv); return; } *tv = inittv; diff_time = ((curr = sys_gethrtime()) + hr_correction - hr_init_time) / 1000; if (curr < hr_init_time) { erl_exit(1,"Unexpected behaviour from operating system high " "resolution timer"); } if ((curr - hr_last_correction_check) / 1000 > 1000000) { /* Check the correction need */ SysHrTime tv_diff, diffdiff; SysTimeval tmp; int done = 0; sys_gettimeofday(&tmp); tv_diff = ((SysHrTime) tmp.tv_sec) * 1000000 + tmp.tv_usec; tv_diff -= ((SysHrTime) inittv.tv_sec) * 1000000 + inittv.tv_usec; diffdiff = diff_time - tv_diff; if (diffdiff > 10000) { SysHrTime corr = (curr - hr_last_time) / 100; if (corr / 1000 >= diffdiff) { ++done; hr_correction -= ((SysHrTime)diffdiff) * 1000; } else { hr_correction -= corr; } diff_time = (curr + hr_correction - hr_init_time) / 1000; } else if (diffdiff < -10000) { SysHrTime corr = (curr - hr_last_time) / 100; if (corr / 1000 >= -diffdiff) { ++done; hr_correction -= ((SysHrTime)diffdiff) * 1000; } else { hr_correction += corr; } diff_time = (curr + hr_correction - hr_init_time) / 1000; } else { ++done; } if (done) { hr_last_correction_check = curr; } } tv->tv_sec += (int) (diff_time / ((SysHrTime) 1000000)); tv->tv_usec += (int) (diff_time % ((SysHrTime) 1000000)); if (tv->tv_usec >= 1000000) { tv->tv_usec -= 1000000; tv->tv_sec += 1; } hr_last_time = curr; }
ErtsThrQCleanState_t erts_thr_q_destroy(ErtsThrQ_t *q) { if (!q->q.blk) erl_exit(ERTS_ABORT_EXIT, "Trying to destroy not created thread queue\n"); return erts_thr_q_finalize(q); }
static struct export_entry* export_alloc(struct export_entry* tmpl_e) { #ifndef ERTS_SLAVE struct export_blob* blob; unsigned ix; if (tmpl_e->slot.index == -1) { /* Template, allocate blob */ Export* tmpl = tmpl_e->ep; Export* obj; blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob)); erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); obj = &blob->exp; obj->fake_op_func_info_for_hipe[0] = 0; obj->fake_op_func_info_for_hipe[1] = 0; obj->code[0] = tmpl->code[0]; obj->code[1] = tmpl->code[1]; obj->code[2] = tmpl->code[2]; obj->code[3] = (BeamInstr) em_call_error_handler; obj->code[4] = 0; #ifdef ERTS_SLAVE_EMU_ENABLED obj->slave_fake_op_func_info_for_hipe[0] = 0; obj->slave_fake_op_func_info_for_hipe[1] = 0; obj->slave_code[0] = tmpl->code[0]; obj->slave_code[1] = tmpl->code[1]; obj->slave_code[2] = tmpl->code[2]; /* If the slave is not online yet, we don't know its opcodes. * slave_code[3] will be touched on all export entries once it comes * online */ if (slave_initialised) obj->slave_code[3] = (BeamInstr) SlaveOp(op_call_error_handler); obj->slave_code[4] = 0; #endif for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) { obj->addressv[ix] = obj->code+3; #ifdef ERTS_SLAVE_EMU_ENABLED obj->slave_addressv[ix] = obj->slave_code+3; #endif blob->entryv[ix].slot.index = -1; blob->entryv[ix].ep = &blob->exp; } ix = 0; } else { /* Existing entry in another table, use free entry in blob */ blob = entry_to_blob(tmpl_e); for (ix = 0; blob->entryv[ix].slot.index >= 0; ix++) { ASSERT(ix < ERTS_NUM_CODE_IX); } } return &blob->entryv[ix]; #else erl_exit(1, "Cannot alloc export entry from slave"); #endif }
static void mach_clocks_init(void) { kern_return_t kret; host_name_port_t host; clock_id_t id; clock_serv_t *clck_srv_p; char *name; host = internal_state.r.o.mach.host = mach_host_self(); #if defined(OS_MONOTONIC_TIME_USING_MACH_CLOCK_GET_TIME) \ || defined(SYS_HRTIME_USING_MACH_CLOCK_GET_TIME) id = internal_state.r.o.mach.clock.monotonic.id = MONOTONIC_CLOCK_ID; name = internal_state.r.o.mach.clock.monotonic.name = MONOTONIC_CLOCK_ID_STR; clck_srv_p = &internal_state.r.o.mach.clock.monotonic.srv; kret = host_get_clock_service(host, id, clck_srv_p); if (kret != KERN_SUCCESS) { erl_exit(ERTS_ABORT_EXIT, "host_get_clock_service(_, %s, _) failed\n", name); } #endif #if defined(OS_SYSTEM_TIME_USING_MACH_CLOCK_GET_TIME) id = internal_state.r.o.mach.clock.wall.id = WALL_CLOCK_ID; name = internal_state.r.o.mach.clock.wall.name = WALL_CLOCK_ID_STR; clck_srv_p = &internal_state.r.o.mach.clock.wall.srv; kret = host_get_clock_service(host, id, clck_srv_p); if (kret != KERN_SUCCESS) { erl_exit(ERTS_ABORT_EXIT, "host_get_clock_service(_, %s, _) failed\n", name); } #endif if (atexit(mach_clocks_fini) != 0) { int err = errno; char *errstr = err ? strerror(err) : "unknown"; erl_exit(ERTS_ABORT_EXIT, "Failed to register mach_clocks_fini() " "for call at exit: %s (%d)\n", errstr, err); } }
static void process_killer(void) { int i, j, max = erts_ptab_max(&erts_proc); Process* rp; erts_printf("\n\nProcess Information\n\n"); erts_printf("--------------------------------------------------\n"); for (i = max-1; i >= 0; i--) { rp = erts_pix2proc(i); if (rp && rp->i != ENULL) { int br; print_process_info(ERTS_PRINT_STDOUT, NULL, rp); erts_printf("(k)ill (n)ext (r)eturn:\n"); while(1) { if ((j = sys_get_key(0)) <= 0) erl_exit(0, ""); switch(j) { case 'k': { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; erts_aint32_t state; erts_proc_inc_refc(rp); erts_smp_proc_lock(rp, rp_locks); state = erts_smp_atomic32_read_acqb(&rp->state); if (state & (ERTS_PSFLG_FREE | ERTS_PSFLG_EXITING | ERTS_PSFLG_ACTIVE | ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_IN_RUNQ | ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS)) { erts_printf("Can only kill WAITING processes this way\n"); } else { (void) erts_send_exit_signal(NULL, NIL, rp, &rp_locks, am_kill, NIL, NULL, 0); } erts_smp_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); } case 'n': br = 1; break; case 'r': return; default: return; } if (br == 1) break; } } } }
Eterm os_putenv_2(Process* p, Eterm key, Eterm value) { char def_buf[1024]; char *buf = NULL; int sep_ix, i, key_len, value_len, tot_len; key_len = is_string(key); if (!key_len) { error: if (buf) erts_free(ERTS_ALC_T_TMP, (void *) buf); BIF_ERROR(p, BADARG); } if (is_nil(value)) value_len = 0; else { value_len = is_string(value); if (!value_len) goto error; } tot_len = key_len + 1 + value_len + 1; if (tot_len <= sizeof(def_buf)) buf = &def_buf[0]; else buf = erts_alloc(ERTS_ALC_T_TMP, tot_len); i = intlist_to_buf(key, buf, key_len); if (i != key_len) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); sep_ix = i; buf[i++] = '='; if (is_not_nil(value)) i += intlist_to_buf(value, &buf[i], value_len); if (i != key_len + 1 + value_len) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); buf[i] = '\0'; if (erts_sys_putenv(buf, sep_ix)) { goto error; } if (buf != &def_buf[0]) erts_free(ERTS_ALC_T_TMP, (void *) buf); BIF_RET(am_true); }
Eterm os_getenv_1(Process* p, Eterm key) { Eterm str; int len, res; char *key_str, *val; char buf[1024]; size_t val_size = sizeof(buf); len = is_string(key); if (!len) { BIF_ERROR(p, BADARG); } /* Leave at least one byte in buf for value */ key_str = len < sizeof(buf)-2 ? &buf[0] : erts_alloc(ERTS_ALC_T_TMP, len+1); if (intlist_to_buf(key, key_str, len) != len) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); key_str[len] = '\0'; if (key_str != &buf[0]) val = &buf[0]; else { val_size -= len + 1; val = &buf[len + 1]; } res = erts_sys_getenv(key_str, val, &val_size); if (res < 0) { no_var: str = am_false; } else { Eterm* hp; if (res > 0) { val = erts_alloc(ERTS_ALC_T_TMP, val_size); while (1) { res = erts_sys_getenv(key_str, val, &val_size); if (res == 0) break; else if (res < 0) goto no_var; else val = erts_realloc(ERTS_ALC_T_TMP, val, val_size); } } if (val_size) hp = HAlloc(p, val_size*2); str = buf_to_intlist(&hp, val, val_size, NIL); } if (key_str != &buf[0]) erts_free(ERTS_ALC_T_TMP, key_str); if (val < &buf[0] || &buf[sizeof(buf)-1] < val) erts_free(ERTS_ALC_T_TMP, val); BIF_RET(str); }
static RegProc* reg_alloc(RegProc *tmpl) { RegProc* obj = (RegProc*) erts_alloc(ERTS_ALC_T_REG_PROC, sizeof(RegProc)); if (!obj) { erl_exit(1, "Can't allocate %d bytes of memory\n", sizeof(RegProc)); } obj->name = tmpl->name; obj->p = tmpl->p; obj->pt = tmpl->pt; return obj; }
void erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks) { ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL); int is_blocking = 0; if (tpd) { if (!tpd->is_temporary) erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Double register of thread\n", __FILE__, __LINE__, __func__); is_blocking = tpd->is_blocking; return_tmp_thr_prgr_data(tpd); } /* * We only allocate the part up to the leader field * which is the first field only used by managed threads */ tpd = erts_alloc(ERTS_ALC_T_THR_PRGR_DATA, offsetof(ErtsThrPrgrData, leader)); tpd->id = (int) erts_atomic32_inc_read_nob(&intrnl->misc.data.unmanaged_id); tpd->is_managed = 0; tpd->is_blocking = is_blocking; tpd->is_temporary = 0; #ifdef ERTS_ENABLE_LOCK_CHECK tpd->is_delaying = 0; #endif ASSERT(tpd->id >= 0); if (tpd->id >= intrnl->unmanaged.no) erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Too many unmanaged registered threads\n", __FILE__, __LINE__, __func__); init_wakeup_request_array(&tpd->wakeup_request[0]); erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd); ASSERT(callbacks->wakeup); intrnl->unmanaged.callbacks[tpd->id] = *callbacks; }
static void fatal_error(int err, char *func) { char *estr = strerror(err); if (!estr) { if (err == ENOTSUP) estr = "Not supported"; else estr = "Unknown error"; } erl_exit(ERTS_ABORT_EXIT, "Fatal error in %s: %s [%d]\n", func, estr, err); }
static ERTS_INLINE Sint64 mach_clock_get_time(ErtsMachClock *clk) { kern_return_t kret; mach_timespec_t time_spec; kret = clock_get_time(clk->srv, &time_spec); if (kret != KERN_SUCCESS) erl_exit(ERTS_ABORT_EXIT, "clock_get_time(%s, _) failed\n", clk->name); return ERTS_TimeSpec2Sint64(&time_spec); }
/* * BIF implementations */ static void pd_hash_erase(Process *p, Eterm id, Eterm *ret) { unsigned int hval; Eterm old; Eterm tmp; unsigned int range; *ret = am_undefined; if (p->dictionary == NULL) { return; } hval = pd_hash_value(p->dictionary, id); old = ARRAY_GET(p->dictionary, hval); if (is_boxed(old)) { /* Tuple */ ASSERT(is_tuple(old)); if (EQ(tuple_val(old)[1], id)) { array_put(&(p->dictionary), hval, NIL); --(p->dictionary->numElements); *ret = tuple_val(old)[2]; } } else if (is_list(old)) { /* Find cons cell for identical value */ Eterm* prev = &p->dictionary->data[hval]; for (tmp = *prev; tmp != NIL; prev = &TCDR(tmp), tmp = *prev) { if (EQ(tuple_val(TCAR(tmp))[1], id)) { *prev = TCDR(tmp); *ret = tuple_val(TCAR(tmp))[2]; --(p->dictionary->numElements); } } /* If there is only one element left in the list we must remove the list. */ old = ARRAY_GET(p->dictionary, hval); ASSERT(is_list(old)); if (is_nil(TCDR(old))) { array_put(&p->dictionary, hval, TCAR(old)); } } else if (is_not_nil(old)) { #ifdef DEBUG erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" "%T\n", p->common.id, __LINE__, old); #endif erl_exit(1, "Damaged process dictionary found during erase/1."); } if ((range = HASH_RANGE(p->dictionary)) > INITIAL_SIZE && range / 2 > (p->dictionary->numElements)) { shrink(p, ret); } }
void erts_os_times(ErtsMonotonicTime *mtimep, ErtsSystemTime *stimep) { kern_return_t mkret, skret; mach_timespec_t mon_time_spec, sys_time_spec; mkret = clock_get_time(internal_state.r.o.mach.clock.monotonic.srv, &mon_time_spec); skret = clock_get_time(internal_state.r.o.mach.clock.wall.srv, &sys_time_spec); if (mkret != KERN_SUCCESS) erl_exit(ERTS_ABORT_EXIT, "clock_get_time(%s, _) failed\n", internal_state.r.o.mach.clock.monotonic.name); if (skret != KERN_SUCCESS) erl_exit(ERTS_ABORT_EXIT, "clock_get_time(%s, _) failed\n", internal_state.r.o.mach.clock.wall.name); *mtimep = (ErtsMonotonicTime) ERTS_TimeSpec2Sint64(&mon_time_spec); *stimep = (ErtsSystemTime) ERTS_TimeSpec2Sint64(&sys_time_spec); }
static ERTS_INLINE Sint64 posix_clock_gettime(clockid_t id, char *name) { struct timespec ts; if (clock_gettime(id, &ts) != 0) { int err = errno; char *errstr = err ? strerror(err) : "unknown"; erl_exit(ERTS_ABORT_EXIT, "clock_gettime(%s, _) failed: %s (%d)\n", name, errstr, err); } return ERTS_TimeSpec2Sint64(&ts); }
/* * Copy object "obj" to process p. */ Eterm copy_object(Eterm obj, Process* to) { Uint size = size_object(obj); Eterm* hp = HAlloc(to, size); Eterm res; res = copy_struct(obj, size, &hp, &to->off_heap); #ifdef DEBUG if (eq(obj, res) == 0) { erl_exit(ERTS_ABORT_EXIT, "copy not equal to source\n"); } #endif return res; }
static void handle_remaining_tasks(ErtsRunQueue *runq, Port *pp) { int i; ErtsPortTask *ptp; ErtsPortTaskQueue *ptqps[] = {pp->sched.exe_taskq, pp->sched.taskq}; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); for (i = 0; i < sizeof(ptqps)/sizeof(ErtsPortTaskQueue *); i++) { if (!ptqps[i]) continue; ptp = pop_task(ptqps[i]); while (ptp) { reset_handle(ptp); erts_smp_runq_unlock(runq); switch (ptp->type) { case ERTS_PORT_TASK_FREE: case ERTS_PORT_TASK_TIMEOUT: break; case ERTS_PORT_TASK_INPUT: erts_stale_drv_select(pp->id, ptp->event, DO_READ, 1); break; case ERTS_PORT_TASK_OUTPUT: erts_stale_drv_select(pp->id, ptp->event, DO_WRITE, 1); break; case ERTS_PORT_TASK_EVENT: erts_stale_drv_select(pp->id, ptp->event, 0, 1); break; case ERTS_PORT_TASK_DIST_CMD: break; default: erl_exit(ERTS_ABORT_EXIT, "Invalid port task type: %d\n", (int) ptp->type); } port_task_free(ptp); erts_smp_runq_lock(runq); ptp = pop_task(ptqps[i]); } } ASSERT(!pp->sched.taskq || !pp->sched.taskq->first); }
/* this routine links the time cells into a free list at the start and sets the time queue as empty */ void erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode) { int itime; /* system dependent init; must be done before do_time_init() if timer thread is enabled */ itime = erts_init_time_sup(time_correction, time_warp_mode); #ifdef TIW_ITIME_IS_CONSTANT if (itime != TIW_ITIME) { erl_exit(ERTS_ABORT_EXIT, "timer resolution mismatch %d != %d", itime, TIW_ITIME); } #else tiw_itime = itime; #endif }