static void table_end_staging_ranges(ErtsAlcType_t alctr, struct ranges* r, int commit) { ErtsCodeIndex dst = erts_staging_code_ix(); if (commit && r[dst].modules == NULL) { Sint i; Sint n; /* No modules added, just clone src and remove purged code. */ ErtsCodeIndex src = erts_active_code_ix(); erts_smp_atomic_add_nob(&mem_used, r[src].n); r[dst].modules = erts_alloc(alctr, r[src].n * sizeof(Range)); r[dst].allocated = r[src].n; n = 0; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n] = *rp; n++; } } r[dst].n = n; erts_smp_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + n / 2)); } }
static ErtsLink *create_link(Uint type, Eterm pid) { Uint lnk_size = ERTS_LINK_SIZE; ErtsLink *n; Eterm *hp; if (!IS_CONST(pid)) { lnk_size += NC_HEAP_SIZE(pid); } if (lnk_size <= ERTS_LINK_SH_SIZE) { n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_SH, lnk_size*sizeof(Uint)); } else { n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH, lnk_size*sizeof(Uint)); erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); } hp = n->heap; n->left = n->right = NULL; /* Always the same initial value*/ n->type = (Uint16) type; n->balance = 0; /* Always the same initial value */ if (n->type == LINK_NODE) { ERTS_LINK_REFC(n) = 0; } else { ERTS_LINK_ROOT(n) = NULL; } CP_LINK_VAL(n->pid, hp, pid); return n; }
void erts_destroy_monitor(ErtsMonitor *mon) { Uint mon_size = ERTS_MONITOR_SIZE; ErlNode *node; ASSERT(!IS_CONST(mon->ref)); mon_size += NC_HEAP_SIZE(mon->ref); if (is_external(mon->ref)) { node = external_thing_ptr(mon->ref)->node; erts_deref_node_entry(node); } if (!IS_CONST(mon->pid)) { mon_size += NC_HEAP_SIZE(mon->pid); if (is_external(mon->pid)) { node = external_thing_ptr(mon->pid)->node; erts_deref_node_entry(node); } } if (mon_size <= ERTS_MONITOR_SH_SIZE) { erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon); } else { erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon); erts_smp_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); } }
static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name) { Uint mon_size = ERTS_MONITOR_SIZE; ErtsMonitor *n; Eterm *hp; mon_size += NC_HEAP_SIZE(ref); if (!IS_CONST(pid)) { mon_size += NC_HEAP_SIZE(pid); } if (mon_size <= ERTS_MONITOR_SH_SIZE) { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_SH, mon_size*sizeof(Uint)); } else { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH, mon_size*sizeof(Uint)); erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); } hp = n->heap; n->left = n->right = NULL; /* Always the same initial value*/ n->type = (Uint16) type; n->balance = 0; /* Always the same initial value */ n->name = name; /* atom() or [] */ CP_LINK_VAL(n->ref, hp, ref); /*XXX Unneccesary check, never immediate*/ CP_LINK_VAL(n->pid, hp, pid); return n; }
static ErtsMonitor *create_monitor(Uint type, Eterm ref, UWord entity, Eterm name) { Uint mon_size = ERTS_MONITOR_SIZE; ErtsMonitor *n; Eterm *hp; mon_size += NC_HEAP_SIZE(ref); if (type != MON_NIF_TARGET && is_not_immed(entity)) { mon_size += NC_HEAP_SIZE(entity); } if (mon_size <= ERTS_MONITOR_SH_SIZE) { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_SH, mon_size*sizeof(Uint)); } else { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH, mon_size*sizeof(Uint)); erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); } hp = n->heap; n->left = n->right = NULL; /* Always the same initial value*/ n->type = (Uint16) type; n->balance = 0; /* Always the same initial value */ n->name = name; /* atom() or [] */ CP_LINK_VAL(n->ref, hp, ref); /*XXX Unnecessary check, never immediate*/ if (type == MON_NIF_TARGET) n->u.resource = (ErtsResource*)entity; else CP_LINK_VAL(n->u.pid, hp, (Eterm)entity); return n; }
static struct export_entry* export_alloc(struct export_entry* tmpl_e) { #ifndef ERTS_SLAVE struct export_blob* blob; unsigned ix; if (tmpl_e->slot.index == -1) { /* Template, allocate blob */ Export* tmpl = tmpl_e->ep; Export* obj; blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob)); erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); obj = &blob->exp; obj->fake_op_func_info_for_hipe[0] = 0; obj->fake_op_func_info_for_hipe[1] = 0; obj->code[0] = tmpl->code[0]; obj->code[1] = tmpl->code[1]; obj->code[2] = tmpl->code[2]; obj->code[3] = (BeamInstr) em_call_error_handler; obj->code[4] = 0; #ifdef ERTS_SLAVE_EMU_ENABLED obj->slave_fake_op_func_info_for_hipe[0] = 0; obj->slave_fake_op_func_info_for_hipe[1] = 0; obj->slave_code[0] = tmpl->code[0]; obj->slave_code[1] = tmpl->code[1]; obj->slave_code[2] = tmpl->code[2]; /* If the slave is not online yet, we don't know its opcodes. * slave_code[3] will be touched on all export entries once it comes * online */ if (slave_initialised) obj->slave_code[3] = (BeamInstr) SlaveOp(op_call_error_handler); obj->slave_code[4] = 0; #endif for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) { obj->addressv[ix] = obj->code+3; #ifdef ERTS_SLAVE_EMU_ENABLED obj->slave_addressv[ix] = obj->slave_code+3; #endif blob->entryv[ix].slot.index = -1; blob->entryv[ix].ep = &blob->exp; } ix = 0; } else { /* Existing entry in another table, use free entry in blob */ blob = entry_to_blob(tmpl_e); for (ix = 0; blob->entryv[ix].slot.index >= 0; ix++) { ASSERT(ix < ERTS_NUM_CODE_IX); } } return &blob->entryv[ix]; #else erl_exit(1, "Cannot alloc export entry from slave"); #endif }
static void table_start_staging_ranges(ErtsAlcType_t alctr, struct ranges* r) { ErtsCodeIndex dst = erts_staging_code_ix(); if (r[dst].modules) { erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated); erts_free(alctr, r[dst].modules); r[dst].modules = NULL; } }
static void export_free(struct export_entry* obj) { #ifndef ERTS_SLAVE struct export_blob* blob = entry_to_blob(obj); int i; obj->slot.index = -1; for (i=0; i < ERTS_NUM_CODE_IX; i++) { if (blob->entryv[i].slot.index >= 0) { return; } } erts_free(ERTS_ALC_T_EXPORT, blob); erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); #else erl_exit(1, "Cannot free export entry from slave"); #endif }
void erts_destroy_link(ErtsLink *lnk) { Uint lnk_size = ERTS_LINK_SIZE; ErlNode *node; ASSERT(lnk->type == LINK_NODE || ERTS_LINK_ROOT(lnk) == NULL); if (!IS_CONST(lnk->pid)) { lnk_size += NC_HEAP_SIZE(lnk->pid); if (is_external(lnk->pid)) { node = external_thing_ptr(lnk->pid)->node; erts_deref_node_entry(node); } } if (lnk_size <= ERTS_LINK_SH_SIZE) { erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk); } else { erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk); erts_smp_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); } }
int erts_sys_putenv(char *key, char *value) { int res; char *env; Uint need = strlen(key) + strlen(value) + 2; #ifdef HAVE_COPYING_PUTENV env = erts_alloc(ERTS_ALC_T_TMP, need); #else env = erts_alloc(ERTS_ALC_T_PUTENV_STR, need); erts_smp_atomic_add_nob(&sys_misc_mem_sz, need); #endif strcpy(env,key); strcat(env,"="); strcat(env,value); erts_smp_rwmtx_rwlock(&environ_rwmtx); res = putenv(env); erts_smp_rwmtx_rwunlock(&environ_rwmtx); #ifdef HAVE_COPYING_PUTENV erts_free(ERTS_ALC_T_TMP, env); #endif return res; }
static void table_update_ranges(ErtsAlcType_t alctr, struct ranges* r, BeamInstr* code, Uint size) { ErtsCodeIndex dst = erts_staging_code_ix(); ErtsCodeIndex src = erts_active_code_ix(); Sint i; Sint n; Sint need; if (src == dst) { ASSERT(!erts_initialized); /* * During start-up of system, the indices are the same. * Handle this by faking a source area. */ src = (src+1) % ERTS_NUM_CODE_IX; if (r[src].modules) { erts_smp_atomic_add_nob(&mem_used, -r[src].allocated); erts_free(alctr, r[src].modules); } r[src] = r[dst]; r[dst].modules = 0; } CHECK(&r[src]); ASSERT(r[dst].modules == NULL); need = r[dst].allocated = r[src].n + 1; erts_smp_atomic_add_nob(&mem_used, need); r[dst].modules = (Range *) erts_alloc(alctr, need * sizeof(Range)); n = 0; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (code < rp->start) { r[dst].modules[n].start = code; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(((byte *)code) + size)); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code); n++; break; } if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n].start = rp->start; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(RANGE_END(rp))); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start); n++; } } while (i < r[src].n) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n].start = rp->start; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(RANGE_END(rp))); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start); n++; } i++; } if (n == 0 || code > r[dst].modules[n-1].start) { r[dst].modules[n].start = code; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(((byte *)code) + size)); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code); n++; } ASSERT(n <= r[src].n+1); r[dst].n = n; erts_smp_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + n / 2)); CHECK(&r[dst]); CHECK(&r[src]); }