static char *pick_list_or_atom(Eterm name_term) { char *name = NULL; ErlDrvSizeT name_len; if (is_atom(name_term)) { Atom *ap = atom_tab(atom_val(name_term)); if (ap->len == 0) { /* If io_lists with zero length is not allowed, then the empty atom shouldn't */ goto error; } name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, ap->len + 1); memcpy(name,ap->name,ap->len); name[ap->len] = '\0'; } else { if (erts_iolist_size(name_term, &name_len)) { goto error; } name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, name_len + 1); if (erts_iolist_to_buf(name_term, name, name_len) != 0) { goto error; } name[name_len] = '\0'; } return name; error: if (name != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); } return NULL; }
static WCHAR * arg_to_env(WCHAR **arg) { WCHAR *block; WCHAR *ptr; int i; int totlen = 1; /* extra '\0' */ for(i = 0; arg[i] != NULL; ++i) { totlen += wcslen(arg[i])+1; } /* sort the environment vector */ qsort(arg, i, sizeof(WCHAR *), &compare); if (totlen == 1){ block = erts_alloc(ERTS_ALC_T_ENVIRONMENT, 2 * sizeof(WCHAR)); block[0] = block[1] = '\0'; } else { block = erts_alloc(ERTS_ALC_T_ENVIRONMENT, totlen * sizeof(WCHAR)); ptr = block; for(i=0; arg[i] != NULL; ++i){ wcscpy(ptr, arg[i]); ptr += wcslen(ptr)+1; } *ptr = '\0'; } return block; }
static char* arg_to_env(char **arg) { char *block; char *ptr; int i; int totlen = 1; /* extra '\0' */ for(i = 0; arg[i] != NULL; ++i) { totlen += strlen(arg[i])+1; } /* sort the environment vector */ qsort(arg, i, sizeof(char *), &compare); if (totlen == 1){ block = erts_alloc(ERTS_ALC_T_ENVIRONMENT, 2); block[0] = block[1] = '\0'; } else { block = erts_alloc(ERTS_ALC_T_ENVIRONMENT, totlen); ptr = block; for(i=0; arg[i] != NULL; ++i){ strcpy(ptr, arg[i]); ptr += strlen(ptr)+1; } *ptr = '\0'; } return block; }
static ErtsMonitor *create_monitor(Uint type, Eterm ref, UWord entity, Eterm name) { Uint mon_size = ERTS_MONITOR_SIZE; ErtsMonitor *n; Eterm *hp; mon_size += NC_HEAP_SIZE(ref); if (type != MON_NIF_TARGET && is_not_immed(entity)) { mon_size += NC_HEAP_SIZE(entity); } if (mon_size <= ERTS_MONITOR_SH_SIZE) { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_SH, mon_size*sizeof(Uint)); } else { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH, mon_size*sizeof(Uint)); erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); } hp = n->heap; n->left = n->right = NULL; /* Always the same initial value*/ n->type = (Uint16) type; n->balance = 0; /* Always the same initial value */ n->name = name; /* atom() or [] */ CP_LINK_VAL(n->ref, hp, ref); /*XXX Unnecessary check, never immediate*/ if (type == MON_NIF_TARGET) n->u.resource = (ErtsResource*)entity; else CP_LINK_VAL(n->u.pid, hp, (Eterm)entity); return n; }
static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name) { Uint mon_size = ERTS_MONITOR_SIZE; ErtsMonitor *n; Eterm *hp; mon_size += NC_HEAP_SIZE(ref); if (!IS_CONST(pid)) { mon_size += NC_HEAP_SIZE(pid); } if (mon_size <= ERTS_MONITOR_SH_SIZE) { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_SH, mon_size*sizeof(Uint)); } else { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH, mon_size*sizeof(Uint)); erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); } hp = n->heap; n->left = n->right = NULL; /* Always the same initial value*/ n->type = (Uint16) type; n->balance = 0; /* Always the same initial value */ n->name = name; /* atom() or [] */ CP_LINK_VAL(n->ref, hp, ref); /*XXX Unneccesary check, never immediate*/ CP_LINK_VAL(n->pid, hp, pid); return n; }
static ErtsLink *create_link(Uint type, Eterm pid) { Uint lnk_size = ERTS_LINK_SIZE; ErtsLink *n; Eterm *hp; if (!IS_CONST(pid)) { lnk_size += NC_HEAP_SIZE(pid); } if (lnk_size <= ERTS_LINK_SH_SIZE) { n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_SH, lnk_size*sizeof(Uint)); } else { n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH, lnk_size*sizeof(Uint)); erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); } hp = n->heap; n->left = n->right = NULL; /* Always the same initial value*/ n->type = (Uint16) type; n->balance = 0; /* Always the same initial value */ if (n->type == LINK_NODE) { ERTS_LINK_REFC(n) = 0; } else { ERTS_LINK_ROOT(n) = NULL; } CP_LINK_VAL(n->pid, hp, pid); return n; }
Eterm os_getenv_1(Process* p, Eterm key) { Eterm str; int len, res; char *key_str, *val; char buf[1024]; size_t val_size = sizeof(buf); len = is_string(key); if (!len) { BIF_ERROR(p, BADARG); } /* Leave at least one byte in buf for value */ key_str = len < sizeof(buf)-2 ? &buf[0] : erts_alloc(ERTS_ALC_T_TMP, len+1); if (intlist_to_buf(key, key_str, len) != len) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); key_str[len] = '\0'; if (key_str != &buf[0]) val = &buf[0]; else { val_size -= len + 1; val = &buf[len + 1]; } res = erts_sys_getenv(key_str, val, &val_size); if (res < 0) { no_var: str = am_false; } else { Eterm* hp; if (res > 0) { val = erts_alloc(ERTS_ALC_T_TMP, val_size); while (1) { res = erts_sys_getenv(key_str, val, &val_size); if (res == 0) break; else if (res < 0) goto no_var; else val = erts_realloc(ERTS_ALC_T_TMP, val, val_size); } } if (val_size) hp = HAlloc(p, val_size*2); str = buf_to_intlist(&hp, val, val_size, NIL); } if (key_str != &buf[0]) erts_free(ERTS_ALC_T_TMP, key_str); if (val < &buf[0] || &buf[sizeof(buf)-1] < val) erts_free(ERTS_ALC_T_TMP, val); BIF_RET(str); }
ErlDrvTid erl_drv_thread_self(void) { #ifdef USE_THREADS struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key); if (!dtid) { int res; /* This is a thread not spawned by this interface. thread_exit_handler() will clean it up when it terminates. */ dtid = erts_alloc(ERTS_ALC_T_DRV_TID, sizeof(struct ErlDrvTid_)); dtid->drv_thr = 0; /* Not a driver thread */ dtid->tid = ethr_self(); dtid->func = NULL; dtid->arg = NULL; dtid->tsd = NULL; dtid->tsd_len = 0; dtid->name = no_name; res = ethr_tsd_set(tid_key, (void *) dtid); if (res != 0) fatal_error(res, "erl_drv_thread_self()"); } return (ErlDrvTid) dtid; #else return (ErlDrvTid) NULL; #endif }
static void table_end_staging_ranges(ErtsAlcType_t alctr, struct ranges* r, int commit) { ErtsCodeIndex dst = erts_staging_code_ix(); if (commit && r[dst].modules == NULL) { Sint i; Sint n; /* No modules added, just clone src and remove purged code. */ ErtsCodeIndex src = erts_active_code_ix(); erts_smp_atomic_add_nob(&mem_used, r[src].n); r[dst].modules = erts_alloc(alctr, r[src].n * sizeof(Range)); r[dst].allocated = r[src].n; n = 0; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n] = *rp; n++; } } r[dst].n = n; erts_smp_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + n / 2)); } }
void erl_drv_tsd_set(ErlDrvTSDKey key, void *data) { #ifdef USE_THREADS struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) erl_drv_thread_self(); #endif if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key]) fatal_error(EINVAL, "erl_drv_tsd_set()"); if (!ERL_DRV_TSD__) { ErlDrvTSDKey i; ERL_DRV_TSD_LEN__ = key + ERL_DRV_TSD_EXTRA; ERL_DRV_TSD__ = erts_alloc(ERTS_ALC_T_DRV_TSD, sizeof(void *)*ERL_DRV_TSD_LEN__); for (i = 0; i < ERL_DRV_TSD_LEN__; i++) ERL_DRV_TSD__[i] = NULL; } else if (ERL_DRV_TSD_LEN__ <= key) { ErlDrvTSDKey i = ERL_DRV_TSD_LEN__; ERL_DRV_TSD_LEN__ = key + ERL_DRV_TSD_EXTRA; ERL_DRV_TSD__ = erts_realloc(ERTS_ALC_T_DRV_TSD, ERL_DRV_TSD__, sizeof(void *)*ERL_DRV_TSD_LEN__); for (; i < ERL_DRV_TSD_LEN__; i++) ERL_DRV_TSD__[i] = NULL; } ERL_DRV_TSD__[key] = data; }
Export * erts_suspend_process_on_pending_purge_lambda(Process *c_p) { erts_smp_mtx_lock(&purge_state.mtx); if (is_value(purge_state.module)) { /* * The process c_p is about to call a fun in the code * that we are trying to purge. Suspend it and call * erts_code_purger:pending_purge_lambda/3. The process * will be resumed when the purge completes or aborts, * and will then try to do the call again. */ if (purge_state.sp_ix >= purge_state.sp_size) { Eterm *sprocs; purge_state.sp_size += 100; sprocs = erts_alloc(ERTS_ALC_T_PURGE_DATA, (sizeof(ErlFunEntry *) * purge_state.sp_size)); sys_memcpy((void *) sprocs, (void *) purge_state.sprocs, purge_state.sp_ix*sizeof(ErlFunEntry *)); if (purge_state.sprocs != &purge_state.def_sprocs[0]) erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs); purge_state.sprocs = sprocs; } purge_state.sprocs[purge_state.sp_ix++] = c_p->common.id; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); ERTS_VBUMP_ALL_REDS(c_p); } erts_smp_mtx_unlock(&purge_state.mtx); return purge_state.pending_purge_lambda; }
static void* dist_table_alloc(void *dep_tmpl) { Eterm chnl_nr; Eterm sysname; DistEntry *dep; erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; sysname = ((DistEntry *) dep_tmpl)->sysname; chnl_nr = make_small((Uint) atom_val(sysname)); dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry)); dist_entries++; dep->prev = NULL; erts_refc_init(&dep->refc, -1); erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr); dep->sysname = sysname; dep->cid = NIL; dep->connection_id = 0; dep->status = 0; dep->flags = 0; dep->version = 0; erts_smp_mtx_init_x(&dep->lnk_mtx, "dist_entry_links", chnl_nr); dep->node_links = NULL; dep->nlinks = NULL; dep->monitors = NULL; erts_smp_mtx_init_x(&dep->qlock, "dist_entry_out_queue", chnl_nr); dep->qflgs = 0; dep->qsize = 0; dep->out_queue.first = NULL; dep->out_queue.last = NULL; dep->suspended = NULL; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; erts_smp_atomic_init_nob(&dep->dist_cmd_scheduled, 0); erts_port_task_handle_init(&dep->dist_cmd); dep->send = NULL; dep->cache = NULL; /* Link in */ /* All new dist entries are "not connected". * erts_this_dist_entry is also always included among "not connected" */ dep->next = erts_not_connected_dist_entries; if(erts_not_connected_dist_entries) { ASSERT(erts_not_connected_dist_entries->prev == NULL); erts_not_connected_dist_entries->prev = dep; } erts_not_connected_dist_entries = dep; erts_no_of_not_connected_dist_entries++; return (void *) dep; }
ErtsThrQ_t * erts_thr_q_create(ErtsThrQInit_t *qi) { ErtsAlcType_t atype; ErtsThrQ_t *q, *qblk; UWord qw; switch (qi->live.queue) { case ERTS_THR_Q_LIVE_SHORT: atype = ERTS_ALC_T_THR_Q_SL; break; case ERTS_THR_Q_LIVE_LONG: atype = ERTS_ALC_T_THR_Q_LL; break; default: atype = ERTS_ALC_T_THR_Q; break; } qw = (UWord) erts_alloc(atype, sizeof(ErtsThrQ_t) + (ERTS_CACHE_LINE_SIZE-1)); qblk = (ErtsThrQ_t *) qw; if (qw & ERTS_CACHE_LINE_MASK) qw = (qw & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE; ASSERT((qw & ERTS_CACHE_LINE_MASK) == 0); q = (ErtsThrQ_t *) qw; erts_thr_q_initialize(q, qi); q->q.blk = qblk; return q; }
unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr) { int i; struct bc_pool* p = &bccix[erts_staging_code_ix()]; ASSERT(p->is_staging); /* * Allocate from free_list while it is non-empty. * If free_list is empty, allocate at high_mark. */ if (p->free_list >= 0) { i = p->free_list; p->free_list = p->beam_catches[i].cdr; } else { if (p->high_mark >= p->tabsize) { /* No free slots and table is full: realloc table */ beam_catch_t* prev_vec = p->beam_catches; unsigned newsize = p->tabsize*2; p->beam_catches = erts_alloc(ERTS_ALC_T_CODE, newsize*sizeof(beam_catch_t)); sys_memcpy(p->beam_catches, prev_vec, p->tabsize*sizeof(beam_catch_t)); gc_old_vec(prev_vec); p->tabsize = newsize; } i = p->high_mark++; } p->beam_catches[i].cp = cp; p->beam_catches[i].cdr = cdr; return i; }
static ERTS_INLINE erts_tse_t * tse_fetch(erts_pix_lock_t *pix_lock) { erts_tse_t *tse = erts_tse_fetch(); if (!tse->udata) { erts_proc_lock_queues_t *qs; #if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL if (pix_lock) erts_pix_unlock(pix_lock); #endif erts_smp_spin_lock(&qs_lock); qs = queue_free_list; if (qs) { queue_free_list = queue_free_list->next; erts_smp_spin_unlock(&qs_lock); } else { erts_smp_spin_unlock(&qs_lock); qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS, sizeof(erts_proc_lock_queues_t)); sys_memcpy((void *) qs, (void *) &zeroqs, sizeof(erts_proc_lock_queues_t)); } tse->udata = qs; #if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL if (pix_lock) erts_pix_lock(pix_lock); #endif } tse->uflgs = 0; return tse; }
int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin) { struct enif_tmp_obj_t* tobj; ErtsAlcType_t allocator; Uint sz; if (is_binary(term)) { return enif_inspect_binary(env,term,bin); } if (is_nil(term)) { bin->data = (unsigned char*) &bin->data; /* dummy non-NULL */ bin->size = 0; bin->bin_term = THE_NON_VALUE; bin->ref_bin = NULL; return 1; } if (erts_iolist_size(term, &sz)) { return 0; } allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF; tobj = erts_alloc(allocator, sz + sizeof(struct enif_tmp_obj_t)); tobj->allocator = allocator; tobj->next = env->tmp_obj_list; tobj->dtor = &tmp_alloc_dtor; env->tmp_obj_list = tobj; bin->data = (unsigned char*) &tobj[1]; bin->size = sz; bin->bin_term = THE_NON_VALUE; bin->ref_bin = NULL; io_list_to_buf(term, (char*) bin->data, sz); ADD_READONLY_CHECK(env, bin->data, bin->size); return 1; }
void erts_update_ranges(BeamInstr* code, Uint size) { ErtsCodeIndex dst = erts_staging_code_ix(); ErtsCodeIndex src = erts_active_code_ix(); if (src == dst) { ASSERT(!erts_initialized); /* * During start-up of system, the indices are the same * and erts_start_staging_ranges() has not been called. */ if (r[dst].modules == NULL) { Sint need = 128; erts_atomic_add_nob(&mem_used, need); r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].allocated = need; write_ptr = r[dst].modules; } } ASSERT(r[dst].modules); write_ptr->start = code; erts_atomic_init_nob(&(write_ptr->end), (erts_aint_t)(((byte *)code) + size)); write_ptr++; }
/* this routine links the time cells into a free list at the start and sets the time queue as empty */ void erts_init_time(void) { int i, itime; /* system dependent init; must be done before do_time_init() if timer thread is enabled */ itime = erts_init_time_sup(); #ifdef TIW_ITIME_IS_CONSTANT if (itime != TIW_ITIME) { erl_exit(ERTS_ABORT_EXIT, "timer resolution mismatch %d != %d", itime, TIW_ITIME); } #else tiw_itime = itime; #endif erts_smp_mtx_init(&tiw_lock, "timer_wheel"); tiw = (ErlTimer**) erts_alloc(ERTS_ALC_T_TIMER_WHEEL, TIW_SIZE * sizeof(ErlTimer*)); for(i = 0; i < TIW_SIZE; i++) tiw[i] = NULL; do_time_init(); tiw_pos = tiw_nto = 0; tiw_min_ptr = NULL; tiw_min = 0; }
static void insert_offheap(ErlOffHeap *oh, int type, Eterm id) { union erl_off_heap_ptr u; struct insert_offheap2_arg a; a.type = BIN_REF; for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) { switch (thing_subtag(u.hdr->thing_word)) { case REFC_BINARY_SUBTAG: if(IsMatchProgBinary(u.pb->val)) { InsertedBin *ib; int insert_bin = 1; for (ib = inserted_bins; ib; ib = ib->next) if(ib->bin_val == u.pb->val) { insert_bin = 0; break; } if (insert_bin) { #if HALFWORD_HEAP UWord val = (UWord) u.pb->val; DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE*2); /* extra place allocated */ #else DeclareTmpHeapNoproc(id_heap,BIG_UINT_HEAP_SIZE); #endif Uint *hp = &id_heap[0]; InsertedBin *nib; #if HALFWORD_HEAP int actual_need = BIG_UWORD_HEAP_SIZE(val); ASSERT(actual_need <= (BIG_UINT_HEAP_SIZE*2)); UseTmpHeapNoproc(actual_need); a.id = erts_bld_uword(&hp, NULL, (UWord) val); #else UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE); a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val); #endif erts_match_prog_foreach_offheap(u.pb->val, insert_offheap2, (void *) &a); nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin)); nib->bin_val = u.pb->val; nib->next = inserted_bins; inserted_bins = nib; #if HALFWORD_HEAP UnUseTmpHeapNoproc(actual_need); #else UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE); #endif } } break; case FUN_SUBTAG: break; /* No need to */ default: ASSERT(is_external_header(u.hdr->thing_word)); insert_node(u.ext->node, type, id); break; } } }
static ErtsFlxCtrDecentralizedCtrArray* create_decentralized_ctr_array(ErtsAlcType_t alloc_type, Uint nr_of_counters) { /* Allocate an ErtsFlxCtrDecentralizedCtrArray and make sure that the array field is located at the start of a cache line */ char* bytes = erts_alloc(alloc_type, sizeof(ErtsFlxCtrDecentralizedCtrArray) + (sizeof(ErtsFlxCtrDecentralizedCtrArrayElem) * ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS) + ERTS_CACHE_LINE_SIZE); void* block_start = bytes; int bytes_to_next_cacheline_border; ErtsFlxCtrDecentralizedCtrArray* array; int i, sched; bytes = &bytes[offsetof(ErtsFlxCtrDecentralizedCtrArray, array)]; bytes_to_next_cacheline_border = ERTS_CACHE_LINE_SIZE - (((Uint)bytes) % ERTS_CACHE_LINE_SIZE); array = (ErtsFlxCtrDecentralizedCtrArray*) (&bytes[bytes_to_next_cacheline_border - (int)offsetof(ErtsFlxCtrDecentralizedCtrArray, array)]); ASSERT(((Uint)array->array) % ERTS_CACHE_LINE_SIZE == 0); ASSERT(((Uint)array - (Uint)block_start) <= ERTS_CACHE_LINE_SIZE); /* Initialize fields */ erts_atomic_init_nob(&array->snapshot_status, ERTS_FLXCTR_SNAPSHOT_ONGOING); for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) { for (i = 0; i < nr_of_counters; i++) { erts_atomic_init_nob(&array->array[sched].counters[i], 0); } } array->block_start = block_start; return array; }
static int try_seize_update_permission(Process* c_p) { int success; ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */ ASSERT(c_p != NULL); erts_mtx_lock(&update_table_permission_mtx); ASSERT(updater_process != c_p); success = (updater_process == NULL); if (success) { updater_process = c_p; } else { struct update_queue_item* qitem; qitem = erts_alloc(ERTS_ALC_T_PERSISTENT_LOCK_Q, sizeof(*qitem)); qitem->p = c_p; erts_proc_inc_refc(c_p); qitem->next = update_queue; update_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } erts_mtx_unlock(&update_table_permission_mtx); return success; }
static void insert_dist_referrer(ReferredDist *referred_dist, int type, Eterm id, Uint creation) { DistReferrer *drp; for(drp = referred_dist->referrers; drp; drp = drp->next) if(id == drp->id && (type == CTRL_REF || creation == drp->creation)) break; if(!drp) { drp = (DistReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(DistReferrer)); drp->next = referred_dist->referrers; referred_dist->referrers = drp; drp->id = id; drp->creation = creation; drp->node_ref = 0; drp->ctrl_ref = 0; } switch (type) { case NODE_REF: drp->node_ref++; break; case CTRL_REF: drp->ctrl_ref++; break; default: ASSERT(0); } }
/* ** init a pre allocated or static hash structure ** and allocate buckets. */ Hash* hash_init(ErtsAlcType_t type, Hash* h, char* name, int size, HashFunctions fun) { int sz; int ix = 0; h->type = type; while (h_size_table[ix] != -1 && h_size_table[ix] < size) ix++; if (h_size_table[ix] == -1) erl_exit(1, "panic: too large hash table size (%d)\n", size); size = h_size_table[ix]; sz = size*sizeof(HashBucket*); h->bucket = (HashBucket**) erts_alloc(h->type, sz); sys_memzero(h->bucket, sz); h->is_allocated = 0; h->name = name; h->fun = fun; h->size = size; h->size20percent = h->size/5; h->size80percent = (4*h->size)/5; h->ix = ix; h->used = 0; return h; }
/* * Called from process_info/1,2. */ Eterm erts_dictionary_copy(Process *p, ProcDict *pd) { Eterm* hp; Eterm* heap_start; Eterm res = NIL; Eterm tmp, tmp2; unsigned int i, num; if (pd == NULL) { return res; } PD_CHECK(pd); num = HASH_RANGE(pd); heap_start = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, sizeof(Eterm) * pd->numElements * 2); for (i = 0; i < num; ++i) { tmp = ARRAY_GET(pd, i); if (is_boxed(tmp)) { ASSERT(is_tuple(tmp)); res = CONS(hp, tmp, res); hp += 2; } else if (is_list(tmp)) { while (tmp != NIL) { tmp2 = TCAR(tmp); res = CONS(hp, tmp2, res); hp += 2; tmp = TCDR(tmp); } } } res = copy_object(res, p); erts_free(ERTS_ALC_T_TMP, (void *) heap_start); return res; }
/* * Calller _must_ yield if we return 0 */ int erts_try_seize_code_write_permission(Process* c_p) { int success; #ifdef ERTS_SMP ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ #endif ASSERT(c_p != NULL); erts_smp_mtx_lock(&code_write_permission_mtx); success = (code_writing_process == NULL); if (success) { code_writing_process = c_p; #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 1); #endif } else { /* Already locked */ struct code_write_queue_item* qitem; ASSERT(code_writing_process != c_p); qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem)); qitem->p = c_p; erts_proc_inc_refc(c_p); qitem->next = code_write_queue; code_write_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } erts_smp_mtx_unlock(&code_write_permission_mtx); return success; }
static void init_am_tot(void) { am_tot = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(Eterm)); atom_init(am_tot, "total"); }
char *erts_sys_ddll_error(int code) { int actual_code; char *local_ptr; if (code > ERL_DE_DYNAMIC_ERROR_OFFSET) { return "Unspecified error"; } actual_code = -1*(code - ERL_DE_DYNAMIC_ERROR_OFFSET); local_ptr = TlsGetValue(tls_index); if (local_ptr == NULL) { local_ptr = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, MAX_ERROR); TlsSetValue(tls_index,local_ptr); } if (!FormatMessage( FORMAT_MESSAGE_FROM_SYSTEM, NULL, (DWORD) actual_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), local_ptr, MAX_ERROR, NULL )) { return "Unspecified error"; } else { char *ptr = local_ptr + strlen(local_ptr) - 1; while (ptr >= local_ptr && (*ptr == '\r' || *ptr == '\n')) { *ptr-- = '\0'; } } return local_ptr; }
static int load_driver_entry(DE_Handle **dhp, char *path, char *name) { int res; DE_Handle *dh = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sizeof(DE_Handle)); assert_drv_list_rwlocked(); dh->handle = NULL; dh->procs = NULL; erts_atomic32_init_nob(&dh->port_count, 0); erts_refc_init(&(dh->refc), (erts_aint_t) 0); dh->status = -1; dh->reload_full_path = NULL; dh->reload_driver_name = NULL; dh->reload_flags = 0; dh->full_path = NULL; dh->flags = 0; if ((res = do_load_driver_entry(dh, path, name)) != ERL_DE_NO_ERROR) { erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh); dh = NULL; } *dhp = dh; return res; }
static struct export_entry* export_alloc(struct export_entry* tmpl_e) { #ifndef ERTS_SLAVE struct export_blob* blob; unsigned ix; if (tmpl_e->slot.index == -1) { /* Template, allocate blob */ Export* tmpl = tmpl_e->ep; Export* obj; blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob)); erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); obj = &blob->exp; obj->fake_op_func_info_for_hipe[0] = 0; obj->fake_op_func_info_for_hipe[1] = 0; obj->code[0] = tmpl->code[0]; obj->code[1] = tmpl->code[1]; obj->code[2] = tmpl->code[2]; obj->code[3] = (BeamInstr) em_call_error_handler; obj->code[4] = 0; #ifdef ERTS_SLAVE_EMU_ENABLED obj->slave_fake_op_func_info_for_hipe[0] = 0; obj->slave_fake_op_func_info_for_hipe[1] = 0; obj->slave_code[0] = tmpl->code[0]; obj->slave_code[1] = tmpl->code[1]; obj->slave_code[2] = tmpl->code[2]; /* If the slave is not online yet, we don't know its opcodes. * slave_code[3] will be touched on all export entries once it comes * online */ if (slave_initialised) obj->slave_code[3] = (BeamInstr) SlaveOp(op_call_error_handler); obj->slave_code[4] = 0; #endif for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) { obj->addressv[ix] = obj->code+3; #ifdef ERTS_SLAVE_EMU_ENABLED obj->slave_addressv[ix] = obj->slave_code+3; #endif blob->entryv[ix].slot.index = -1; blob->entryv[ix].ep = &blob->exp; } ix = 0; } else { /* Existing entry in another table, use free entry in blob */ blob = entry_to_blob(tmpl_e); for (ix = 0; blob->entryv[ix].slot.index >= 0; ix++) { ASSERT(ix < ERTS_NUM_CODE_IX); } } return &blob->entryv[ix]; #else erl_exit(1, "Cannot alloc export entry from slave"); #endif }
static void set_driver_reloading(DE_Handle *dh, Process *proc, char *path, char *name, Uint flags) { DE_ProcEntry *p; assert_drv_list_rwlocked(); p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry)); p->proc = proc; p->awaiting_status = ERL_DE_OK; p->next = dh->procs; p->flags = ERL_DE_FL_DEREFERENCED; dh->procs = p; dh->status = ERL_DE_RELOAD; dh->reload_full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1); strcpy(dh->reload_full_path,path); dh->reload_driver_name = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(name) + 1); strcpy(dh->reload_driver_name,name); dh->reload_flags = flags; }