static void reg_free(RegProc *obj) { erts_free(ERTS_ALC_T_REG_PROC, (void*) obj); }
void erts_destroy_suspend_monitor(ErtsSuspendMonitor *smon) { erts_free(ERTS_ALC_T_SUSPEND_MON, smon); }
static void module_free(Module* mod) { erts_free(ERTS_ALC_T_MODULE, mod); erts_atomic_add_nob(&tot_module_bytes, -sizeof(Module)); }
BIF_RETTYPE load_nif_2(BIF_ALIST_2) { static const char bad_lib[] = "bad_lib"; static const char reload[] = "reload"; static const char upgrade[] = "upgrade"; char* lib_name = NULL; void* handle = NULL; void* init_func; ErlNifEntry* entry = NULL; ErlNifEnv env; int len, i, err; Module* mod; Eterm mod_atom; Eterm f_atom; BeamInstr* caller; ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; Eterm ret = am_ok; int veto; struct erl_module_nif* lib = NULL; len = list_length(BIF_ARG_1); if (len < 0) { BIF_ERROR(BIF_P, BADARG); } lib_name = (char *) erts_alloc(ERTS_ALC_T_TMP, len + 1); if (intlist_to_buf(BIF_ARG_1, lib_name, len) != len) { erts_free(ERTS_ALC_T_TMP, lib_name); BIF_ERROR(BIF_P, BADARG); } lib_name[len] = '\0'; /* Block system (is this the right place to do it?) */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); /* Find calling module */ ASSERT(BIF_P->current != NULL); ASSERT(BIF_P->current[0] == am_erlang && BIF_P->current[1] == am_load_nif && BIF_P->current[2] == 2); caller = find_function_from_pc(BIF_P->cp); ASSERT(caller != NULL); mod_atom = caller[0]; ASSERT(is_atom(mod_atom)); mod=erts_get_module(mod_atom); ASSERT(mod != NULL); if (!in_area(caller, mod->code, mod->code_length)) { ASSERT(in_area(caller, mod->old_code, mod->old_code_length)); ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old " "module '%T' not allowed", mod_atom); } else if ((err=erts_sys_ddll_open2(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) { const char slogan[] = "Failed to load NIF library"; if (strstr(errdesc.str, lib_name) != NULL) { ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str); } else { ret = load_nif_error(BIF_P, "load_failed", "%s %s: '%s'", slogan, lib_name, errdesc.str); } } else if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) { ret = load_nif_error(BIF_P, bad_lib, "Failed to find library init" " function: '%s'", errdesc.str); } else if ((add_taint(mod_atom), (entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) { ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful"); } else if (entry->major != ERL_NIF_MAJOR_VERSION || entry->minor > ERL_NIF_MINOR_VERSION) { ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).", entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION); } else if (entry->minor >= 1 && sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) { ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for " "this vm variant (%s).", entry->vm_variant, ERL_NIF_VM_VARIANT); } else if (!erts_is_atom_str((char*)entry->name, mod_atom)) { ret = load_nif_error(BIF_P, bad_lib, "Library module name '%s' does not" " match calling module '%T'", entry->name, mod_atom); } else { /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/ for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) { BeamInstr** code_pp; ErlNifFunc* f = &entry->funcs[i]; if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom) || (code_pp = get_func_pp(mod->code, f_atom, f->arity))==NULL) { ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u", mod_atom, f->name, f->arity); } else if (code_pp[1] - code_pp[0] < (5+3)) { ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif" " in module (%T:%s/%u to small)", mod_atom, entry->funcs[i].name, entry->funcs[i].arity); } /*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n", mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/ } } if (ret != am_ok) { goto error; } /* Call load, reload or upgrade: */ lib = erts_alloc(ERTS_ALC_T_NIF, sizeof(struct erl_module_nif)); lib->handle = handle; lib->entry = entry; erts_refc_init(&lib->rt_cnt, 0); erts_refc_init(&lib->rt_dtor_cnt, 0); lib->mod = mod; env.mod_nif = lib; if (mod->nif != NULL) { /* Reload */ int k; lib->priv_data = mod->nif->priv_data; ASSERT(mod->nif->entry != NULL); if (entry->reload == NULL) { ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library."); goto error; } /* Check that no NIF is removed */ for (k=0; k < mod->nif->entry->num_of_funcs; k++) { ErlNifFunc* old_func = &mod->nif->entry->funcs[k]; for (i=0; i < entry->num_of_funcs; i++) { if (old_func->arity == entry->funcs[i].arity && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) { break; } } if (i == entry->num_of_funcs) { ret = load_nif_error(BIF_P,reload,"Reloaded library missing " "function %T:%s/%u\r\n", mod_atom, old_func->name, old_func->arity); goto error; } } erts_pre_nif(&env, BIF_P, lib); veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful."); } else { mod->nif->entry = NULL; /* to prevent 'unload' callback */ erts_unload_nif(mod->nif); } } else { lib->priv_data = NULL; if (mod->old_nif != NULL) { /* Upgrade */ void* prev_old_data = mod->old_nif->priv_data; if (entry->upgrade == NULL) { ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); goto error; } erts_pre_nif(&env, BIF_P, lib); veto = entry->upgrade(&env, &lib->priv_data, &mod->old_nif->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { mod->old_nif->priv_data = prev_old_data; ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful."); } /*else if (mod->old_nif->priv_data != prev_old_data) { refresh_cached_nif_data(mod->old_code, mod->old_nif); }*/ } else if (entry->load != NULL) { /* Initial load */ erts_pre_nif(&env, BIF_P, lib); veto = entry->load(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful."); } } } if (ret == am_ok) { /* ** Everything ok, patch the beam code with op_call_nif */ mod->nif = lib; for (i=0; i < entry->num_of_funcs; i++) { BeamInstr* code_ptr; erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom); code_ptr = *get_func_pp(mod->code, f_atom, entry->funcs[i].arity); if (code_ptr[1] == 0) { code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif); } else { /* Function traced, patch the original instruction word */ BpData** bps = (BpData**) code_ptr[1]; BpData* bp = (BpData*) bps[bp_sched2ix()]; bp->orig_instr = (BeamInstr) BeamOp(op_call_nif); } code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr; code_ptr[5+2] = (BeamInstr) lib; } } else { error: ASSERT(ret != am_ok); if (lib != NULL) { erts_free(ERTS_ALC_T_NIF, lib); } if (handle != NULL) { erts_sys_ddll_close(handle); } erts_sys_ddll_free_error(&errdesc); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_free(ERTS_ALC_T_TMP, lib_name); BIF_RET(ret); }
void enif_free_env(ErlNifEnv* env) { enif_clear_env(env); erts_free(ERTS_ALC_T_NIF, env); }
/* decode_packet(Type,Bin,Options) Returns: {ok, PacketBodyBin, RestBin} {more, PacketSz | undefined} {error, invalid} */ BIF_RETTYPE decode_packet_3(BIF_ALIST_3) { unsigned max_plen = 0; /* Packet max length, 0=no limit */ unsigned trunc_len = 0; /* Truncate lines if longer, 0=no limit */ int http_state = 0; /* 0=request/response 1=header */ int packet_sz; /*-------Binaries involved: ------------------*/ byte* bin_ptr; /*| orig: original binary */ byte bin_bitsz; /*| bin: BIF_ARG_2, may be sub-binary of orig */ /*| packet: prefix of bin */ char* body_ptr; /*| body: part of packet to return */ int body_sz; /*| rest: bin without packet */ struct packet_callback_args pca; enum PacketParseType type; Eterm* hp; Eterm* hend; ErlSubBin* rest; Eterm res; Eterm options; int code; if (!is_binary(BIF_ARG_2) || (!is_list(BIF_ARG_3) && !is_nil(BIF_ARG_3))) { BIF_ERROR(BIF_P, BADARG); } switch (BIF_ARG_1) { case make_small(0): case am_raw: type = TCP_PB_RAW; break; case make_small(1): type = TCP_PB_1; break; case make_small(2): type = TCP_PB_2; break; case make_small(4): type = TCP_PB_4; break; case am_asn1: type = TCP_PB_ASN1; break; case am_sunrm: type = TCP_PB_RM; break; case am_cdr: type = TCP_PB_CDR; break; case am_fcgi: type = TCP_PB_FCGI; break; case am_line: type = TCP_PB_LINE_LF; break; case am_tpkt: type = TCP_PB_TPKT; break; case am_http: type = TCP_PB_HTTP; break; case am_httph: type = TCP_PB_HTTPH; break; case am_http_bin: type = TCP_PB_HTTP_BIN; break; case am_httph_bin: type = TCP_PB_HTTPH_BIN; break; case am_ssl_tls: type = TCP_PB_SSL_TLS; break; default: BIF_ERROR(BIF_P, BADARG); } options = BIF_ARG_3; while (!is_nil(options)) { Eterm* cons = list_val(options); if (is_tuple(CAR(cons))) { Eterm* tpl = tuple_val(CAR(cons)); Uint val; if (tpl[0] == make_arityval(2) && term_to_Uint(tpl[2],&val) && val <= UINT_MAX) { switch (tpl[1]) { case am_packet_size: max_plen = val; goto next_option; case am_line_length: trunc_len = val; goto next_option; } } } BIF_ERROR(BIF_P, BADARG); next_option: options = CDR(cons); } pca.bin_sz = binary_size(BIF_ARG_2); ERTS_GET_BINARY_BYTES(BIF_ARG_2, bin_ptr, pca.bin_bitoffs, bin_bitsz); if (pca.bin_bitoffs != 0) { pca.aligned_ptr = erts_alloc(ERTS_ALC_T_TMP, pca.bin_sz); erts_copy_bits(bin_ptr, pca.bin_bitoffs, 1, pca.aligned_ptr, 0, 1, pca.bin_sz*8); } else { pca.aligned_ptr = bin_ptr; } packet_sz = packet_get_length(type, (char*)pca.aligned_ptr, pca.bin_sz, max_plen, trunc_len, &http_state); if (!(packet_sz > 0 && packet_sz <= pca.bin_sz)) { if (packet_sz < 0) { goto error; } else { /* not enough data */ Eterm plen = (packet_sz==0) ? am_undefined : erts_make_integer(packet_sz, BIF_P); Eterm* hp = HAlloc(BIF_P,3); res = TUPLE2(hp, am_more, plen); goto done; } } /* We got a whole packet */ body_ptr = (char*) pca.aligned_ptr; body_sz = packet_sz; packet_get_body(type, (const char**) &body_ptr, &body_sz); ERTS_GET_REAL_BIN(BIF_ARG_2, pca.orig, pca.bin_offs, pca.bin_bitoffs, bin_bitsz); pca.p = BIF_P; pca.res = THE_NON_VALUE; pca.string_as_bin = (type == TCP_PB_HTTP_BIN || type == TCP_PB_HTTPH_BIN); code = packet_parse(type, (char*)pca.aligned_ptr, packet_sz, &http_state, &packet_callbacks_erl, &pca); if (code == 0) { /* no special packet parsing, make plain binary */ ErlSubBin* body; Uint hsz = 2*ERL_SUB_BIN_SIZE + 4; hp = HAlloc(BIF_P, hsz); hend = hp + hsz; body = (ErlSubBin *) hp; body->thing_word = HEADER_SUB_BIN; body->size = body_sz; body->offs = pca.bin_offs + (body_ptr - (char*)pca.aligned_ptr); body->orig = pca.orig; body->bitoffs = pca.bin_bitoffs; body->bitsize = 0; body->is_writable = 0; hp += ERL_SUB_BIN_SIZE; pca.res = make_binary(body); } else if (code > 0) { Uint hsz = ERL_SUB_BIN_SIZE + 4; ASSERT(pca.res != THE_NON_VALUE); hp = HAlloc(BIF_P, hsz); hend = hp + hsz; } else { error: hp = HAlloc(BIF_P,3); res = TUPLE2(hp, am_error, am_invalid); goto done; } rest = (ErlSubBin *) hp; rest->thing_word = HEADER_SUB_BIN; rest->size = pca.bin_sz - packet_sz; rest->offs = pca.bin_offs + packet_sz; rest->orig = pca.orig; rest->bitoffs = pca.bin_bitoffs; rest->bitsize = bin_bitsz; /* The extra bits go into the rest. */ rest->is_writable = 0; hp += ERL_SUB_BIN_SIZE; res = TUPLE3(hp, am_ok, pca.res, make_binary(rest)); hp += 4; ASSERT(hp==hend); (void)hend; done: if (pca.aligned_ptr != bin_ptr) { erts_free(ERTS_ALC_T_TMP, pca.aligned_ptr); } BIF_RET(res); }
static Port * open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) { int i; Eterm option; Uint arity; Eterm* tp; Uint* nargs; erts_driver_t* driver; char* name_buf = NULL; SysDriverOpts opts; Sint linebuf; Eterm edir = NIL; byte dir[MAXPATHLEN]; erts_aint32_t sflgs = 0; Port *port; /* These are the defaults */ opts.packet_bytes = 0; opts.use_stdio = 1; opts.redir_stderr = 0; opts.read_write = 0; opts.hide_window = 0; opts.wd = NULL; opts.envir = NULL; opts.exit_status = 0; opts.overlapped_io = 0; opts.spawn_type = ERTS_SPAWN_ANY; opts.argv = NULL; opts.parallelism = erts_port_parallelism; linebuf = 0; *err_nump = 0; if (is_not_list(settings) && is_not_nil(settings)) { goto badarg; } /* * Parse the settings. */ if (is_not_nil(settings)) { nargs = list_val(settings); while (1) { if (is_tuple_arity(*nargs, 2)) { tp = tuple_val(*nargs); arity = *tp++; option = *tp++; if (option == am_packet) { if (is_not_small(*tp)) { goto badarg; } opts.packet_bytes = signed_val(*tp); switch (opts.packet_bytes) { case 1: case 2: case 4: break; default: goto badarg; } } else if (option == am_line) { if (is_not_small(*tp)) { goto badarg; } linebuf = signed_val(*tp); if (linebuf <= 0) { goto badarg; } } else if (option == am_env) { byte* bytes; if ((bytes = convert_environment(p, *tp)) == NULL) { goto badarg; } opts.envir = (char *) bytes; } else if (option == am_args) { char **av; char **oav = opts.argv; if ((av = convert_args(*tp)) == NULL) { goto badarg; } opts.argv = av; if (oav) { opts.argv[0] = oav[0]; oav[0] = erts_default_arg0; free_args(oav); } } else if (option == am_arg0) { char *a0; if ((a0 = erts_convert_filename_to_native(*tp, NULL, 0, ERTS_ALC_T_TMP, 1, 1, NULL)) == NULL) { goto badarg; } if (opts.argv == NULL) { opts.argv = erts_alloc(ERTS_ALC_T_TMP, 2 * sizeof(char **)); opts.argv[0] = a0; opts.argv[1] = NULL; } else { if (opts.argv[0] != erts_default_arg0) { erts_free(ERTS_ALC_T_TMP, opts.argv[0]); } opts.argv[0] = a0; } } else if (option == am_cd) { edir = *tp; } else if (option == am_parallelism) { if (*tp == am_true) opts.parallelism = 1; else if (*tp == am_false) opts.parallelism = 0; else goto badarg; } else { goto badarg; } } else if (*nargs == am_stream) { opts.packet_bytes = 0; } else if (*nargs == am_use_stdio) { opts.use_stdio = 1; } else if (*nargs == am_stderr_to_stdout) { opts.redir_stderr = 1; } else if (*nargs == am_line) { linebuf = 512; } else if (*nargs == am_nouse_stdio) { opts.use_stdio = 0; } else if (*nargs == am_binary) { sflgs |= ERTS_PORT_SFLG_BINARY_IO; } else if (*nargs == am_in) { opts.read_write |= DO_READ; } else if (*nargs == am_out) { opts.read_write |= DO_WRITE; } else if (*nargs == am_eof) { sflgs |= ERTS_PORT_SFLG_SOFT_EOF; } else if (*nargs == am_hide) { opts.hide_window = 1; } else if (*nargs == am_exit_status) { opts.exit_status = 1; } else if (*nargs == am_overlapped_io) { opts.overlapped_io = 1; } else { goto badarg; } if (is_nil(*++nargs)) break; if (is_not_list(*nargs)) { goto badarg; } nargs = list_val(*nargs); } } if (opts.read_write == 0) /* implement default */ opts.read_write = DO_READ|DO_WRITE; /* Mutually exclusive arguments. */ if((linebuf && opts.packet_bytes) || (opts.redir_stderr && !opts.use_stdio)) { goto badarg; } /* * Parse the first argument and start the appropriate driver. */ if (is_atom(name) || (i = is_string(name))) { /* a vanilla port */ if (is_atom(name)) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, atom_tab(atom_val(name))->len+1); sys_memcpy((void *) name_buf, (void *) atom_tab(atom_val(name))->name, atom_tab(atom_val(name))->len); name_buf[atom_tab(atom_val(name))->len] = '\0'; } else { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1); if (intlist_to_buf(name, name_buf, i) != i) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); name_buf[i] = '\0'; } driver = &vanilla_driver; } else { if (is_not_tuple(name)) { goto badarg; /* Not a process or fd port */ } tp = tuple_val(name); arity = *tp++; if (arity == make_arityval(0)) { goto badarg; } if (*tp == am_spawn || *tp == am_spawn_driver || *tp == am_spawn_executable) { /* A process port */ int encoding; if (arity != make_arityval(2)) { goto badarg; } name = tp[1]; encoding = erts_get_native_filename_encoding(); /* Do not convert the command to utf-16le yet, do that in win32 specific code */ /* since the cmd is used for comparsion with drivers names and copied to port info */ if (encoding == ERL_FILENAME_WIN_WCHAR) { encoding = ERL_FILENAME_UTF8; } if ((name_buf = erts_convert_filename_to_encoding(name, NULL, 0, ERTS_ALC_T_TMP,0,1, encoding, NULL)) == NULL) { goto badarg; } if (*tp == am_spawn_driver) { opts.spawn_type = ERTS_SPAWN_DRIVER; } else if (*tp == am_spawn_executable) { opts.spawn_type = ERTS_SPAWN_EXECUTABLE; } driver = &spawn_driver; } else if (*tp == am_fd) { /* An fd port */ int n; struct Sint_buf sbuf; char* p; if (arity != make_arityval(3)) { goto badarg; } if (is_not_small(tp[1]) || is_not_small(tp[2])) { goto badarg; } opts.ifd = unsigned_val(tp[1]); opts.ofd = unsigned_val(tp[2]); /* Syntesize name from input and output descriptor. */ name_buf = erts_alloc(ERTS_ALC_T_TMP, 2*sizeof(struct Sint_buf) + 2); p = Sint_to_buf(opts.ifd, &sbuf); n = sys_strlen(p); sys_strncpy(name_buf, p, n); name_buf[n] = '/'; p = Sint_to_buf(opts.ofd, &sbuf); sys_strcpy(name_buf+n+1, p); driver = &fd_driver; } else { goto badarg; } } if ((driver != &spawn_driver && opts.argv != NULL) || (driver == &spawn_driver && opts.spawn_type != ERTS_SPAWN_EXECUTABLE && opts.argv != NULL)) { /* Argument vector only if explicit spawn_executable */ goto badarg; } if (edir != NIL) { if ((opts.wd = erts_convert_filename_to_native(edir, NULL, 0, ERTS_ALC_T_TMP,0,1,NULL)) == NULL) { goto badarg; } } if (driver != &spawn_driver && opts.exit_status) { goto badarg; } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_out); } erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump); #ifdef USE_VM_PROBES if (port && DTRACE_ENABLED(port_open)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(p, process_str); erts_snprintf(port_str, sizeof(port_str), "%T", port->common.id); DTRACE3(port_open, process_str, name_buf, port_str); } #endif erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); if (!port) { DEBUGF(("open_driver returned (%d:%d)\n", err_typep ? *err_typep : 4711, err_nump ? *err_nump : 4711)); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } goto do_return; } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } if (linebuf && port->linebuf == NULL){ port->linebuf = allocate_linebuf(linebuf); sflgs |= ERTS_PORT_SFLG_LINEBUF_IO; } if (sflgs) erts_atomic32_read_bor_relb(&port->state, sflgs); do_return: if (name_buf) erts_free(ERTS_ALC_T_TMP, (void *) name_buf); if (opts.argv) { free_args(opts.argv); } if (opts.wd && opts.wd != ((char *)dir)) { erts_free(ERTS_ALC_T_TMP, (void *) opts.wd); } return port; badarg: if (err_typep) *err_typep = -3; if (err_nump) *err_nump = BADARG; port = NULL; goto do_return; }
static void system_cleanup(int exit_code) { /* No cleanup wanted if ... * 1. we are about to do an abnormal exit * 2. we haven't finished initializing, or * 3. another thread than the main thread is performing the exit * (in threaded non smp case). */ if (exit_code != 0 || !erts_initialized #if defined(USE_THREADS) && !defined(ERTS_SMP) || !erts_equal_tids(main_thread, erts_thr_self()) #endif ) return; #ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC); /* We never release it... */ #endif #ifdef HYBRID if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_src_stack); if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_dst_stack); if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_offset_stack); ma_src_stack = NULL; ma_dst_stack = NULL; ma_offset_stack = NULL; erts_cleanup_offheap(&erts_global_offheap); #endif #if defined(HYBRID) && !defined(INCREMENTAL) if (global_heap) { ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) global_heap, sizeof(Eterm) * global_heap_sz); } global_heap = NULL; #endif #ifdef INCREMENTAL erts_cleanup_incgc(); #endif #if defined(USE_THREADS) exit_async(); #endif #if HAVE_ERTS_MSEG erts_mseg_exit(); #endif /* * A lot more cleaning could/should have been done... */ }
static void ethr_std_free(void *ptr) { erts_free(ERTS_ALC_T_ETHR_STD, ptr); }
static void* async_main(void* arg) { AsyncQueue* q = (AsyncQueue*) arg; #ifdef ERTS_ENABLE_LOCK_CHECK { char buf[27]; erts_snprintf(&buf[0], 27, "async %d", q->no); erts_lc_set_thread_name(&buf[0]); } #endif while(1) { ErlAsync* a = async_get(q); if (a->port == NIL) { /* TIME TO DIE SIGNAL */ erts_free(ERTS_ALC_T_ASYNC, (void *) a); break; } else { (*a->async_invoke)(a->async_data); /* Major problem if the code for async_invoke or async_free is removed during a blocking operation */ #ifdef ERTS_SMP { Port *p; p = erts_id2port_sflgs(a->port, NULL, 0, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); if (!p) { if (a->async_free) (*a->async_free)(a->async_data); } else { if (async_ready(p, a->async_data)) { if (a->async_free) (*a->async_free)(a->async_data); } async_detach(a->hndl); erts_port_release(p); } if (a->pdl) { driver_pdl_dec_refc(a->pdl); } erts_free(ERTS_ALC_T_ASYNC, (void *) a); } #else if (a->pdl) { driver_pdl_dec_refc(a->pdl); } erts_mtx_lock(&async_ready_mtx); a->next = async_ready_list; async_ready_list = a; erts_mtx_unlock(&async_ready_mtx); sys_async_ready(q->hndl); #endif } } return NULL; }
/* ** Schedule async_invoke on a worker thread ** NOTE will be syncrounous when threads are unsupported ** return values: ** 0 completed ** -1 error ** N handle value (used with async_cancel) ** arguments: ** ix driver index ** key pointer to secedule queue (NULL means round robin) ** async_invoke function to run in thread ** async_data data to pass to invoke function ** async_free function for relase async_data in case of failure */ long driver_async(ErlDrvPort ix, unsigned int* key, void (*async_invoke)(void*), void* async_data, void (*async_free)(void*)) { ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErlAsync)); Port* prt = erts_drvport2port(ix); long id; unsigned int qix; if (!prt) return -1; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); a->next = NULL; a->prev = NULL; a->hndl = (DE_Handle*)prt->drv_ptr->handle; a->port = prt->id; a->pdl = NULL; a->async_data = async_data; a->async_invoke = async_invoke; a->async_free = async_free; erts_smp_spin_lock(&async_id_lock); async_id = (async_id + 1) & 0x7fffffff; if (async_id == 0) async_id++; id = async_id; erts_smp_spin_unlock(&async_id_lock); a->async_id = id; if (key == NULL) { qix = (erts_async_max_threads > 0) ? (id % erts_async_max_threads) : 0; } else { qix = (erts_async_max_threads > 0) ? (*key % erts_async_max_threads) : 0; *key = qix; } #ifdef USE_THREADS if (erts_async_max_threads > 0) { if (prt->port_data_lock) { driver_pdl_inc_refc(prt->port_data_lock); a->pdl = prt->port_data_lock; } async_add(a, &async_q[qix]); return id; } #endif (*a->async_invoke)(a->async_data); if (async_ready(prt, a->async_data)) { if (a->async_free != NULL) (*a->async_free)(a->async_data); } erts_free(ERTS_ALC_T_ASYNC, (void *) a); return id; }
/* ** Schedule async_invoke on a worker thread ** NOTE will be syncrounous when threads are unsupported ** return values: ** 0 completed ** -1 error ** N handle value (used with async_cancel) ** arguments: ** ix driver index ** key pointer to secedule queue (NULL means round robin) ** async_invoke function to run in thread ** async_data data to pass to invoke function ** async_free function for relase async_data in case of failure */ long driver_async(ErlDrvPort ix, unsigned int* key, void (*async_invoke)(void*), void* async_data, void (*async_free)(void*)) { ErtsAsync* a; Port* prt; long id; unsigned int qix; #if ERTS_USE_ASYNC_READY_Q Uint sched_id; sched_id = erts_get_scheduler_id(); if (!sched_id) sched_id = 1; #endif prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync)); #if ERTS_USE_ASYNC_READY_Q a->sched_id = sched_id; #endif a->hndl = (DE_Handle*)prt->drv_ptr->handle; a->port = prt->common.id; a->pdl = NULL; a->async_data = async_data; a->async_invoke = async_invoke; a->async_free = async_free; if (!async) id = 0; else { do { id = erts_atomic_inc_read_nob(&async->init.data.id); } while (id == 0); if (id < 0) id *= -1; ASSERT(id > 0); } a->async_id = id; if (key == NULL) { qix = (erts_async_max_threads > 0) ? (id % erts_async_max_threads) : 0; } else { qix = (erts_async_max_threads > 0) ? (*key % erts_async_max_threads) : 0; *key = qix; } #ifdef USE_THREADS if (erts_async_max_threads > 0) { if (prt->port_data_lock) { driver_pdl_inc_refc(prt->port_data_lock); a->pdl = prt->port_data_lock; } async_add(a, async_q(qix)); return id; } #endif (*a->async_invoke)(a->async_data); if (async_ready(prt, a->async_data)) { if (a->async_free != NULL) (*a->async_free)(a->async_data); } erts_free(ERTS_ALC_T_ASYNC, (void *) a); return id; }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { Module* modp = erts_get_module(BIF_ARG_1); Eterm on_load; if (!modp || modp->code == 0) { error: BIF_ERROR(BIF_P, BADARG); } if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_block_system(0); if (BIF_ARG_2 == am_true) { int i; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(); i++) { Export *ep = export_list(i); if (ep != NULL && ep->code[0] == BIF_ARG_1 && ep->code[4] != 0) { ep->address = (void *) ep->code[4]; ep->code[3] = 0; ep->code[4] = 0; } } modp->code[MI_ON_LOAD_FUNCTION_PTR] = 0; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { Eterm* code; Eterm* end; /* * The on_load function failed. Remove the loaded code. * This is an combination of delete and purge. We purge * the current code; the old code is not touched. */ erts_total_code_size -= modp->code_length; code = modp->code; end = (Eterm *)((char *)code + modp->code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->catches, code, modp->code_length); erts_free(ERTS_ALC_T_CODE, (void *) code); modp->code = NULL; modp->code_length = 0; modp->catches = BEAM_CATCHES_NIL; remove_from_address_table(code); } erts_smp_release_system(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); }
Eterm erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period) { int i, len, max, min, allctr; Eterm *names, *values, res; Uint arr_size, stat_size, hsz, *hszp, *hp, **hpp; Stat_t *stat_src, *stat; if (!erts_instr_stat) return am_false; if (!atoms_initialized) init_atoms(); if (what == am.total) { min = 0; max = 0; allctr = 0; stat_size = sizeof(Stat_t); stat_src = &stats->tot; if (!am_tot) init_am_tot(); names = am_tot; } else if (what == am.allocators) { min = ERTS_ALC_A_MIN; max = ERTS_ALC_A_MAX; allctr = 1; stat_size = sizeof(Stat_t)*(ERTS_ALC_A_MAX+1); stat_src = stats->a; if (!am_a) init_am_a(); names = am_a; } else if (what == am.classes) { min = ERTS_ALC_C_MIN; max = ERTS_ALC_C_MAX; allctr = 0; stat_size = sizeof(Stat_t)*(ERTS_ALC_C_MAX+1); stat_src = stats->c; if (!am_c) init_am_c(); names = &am_c[ERTS_ALC_C_MIN]; } else if (what == am.types) { min = ERTS_ALC_N_MIN; max = ERTS_ALC_N_MAX; allctr = 0; stat_size = sizeof(Stat_t)*(ERTS_ALC_N_MAX+1); stat_src = stats->n; if (!am_n) init_am_n(); names = &am_n[ERTS_ALC_N_MIN]; } else { return THE_NON_VALUE; } stat = (Stat_t *) erts_alloc(ERTS_ALC_T_TMP, stat_size); arr_size = (max - min + 1)*sizeof(Eterm); if (allctr) names = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size); values = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size); erts_mtx_lock(&instr_mutex); update_max_ever_values(stat_src, min, max); sys_memcpy((void *) stat, (void *) stat_src, stat_size); if (begin_max_period) begin_new_max_period(stat_src, min, max); erts_mtx_unlock(&instr_mutex); hsz = 0; hszp = &hsz; hpp = NULL; restart_bld: len = 0; for (i = min; i <= max; i++) { if (!allctr || erts_allctrs_info[i].enabled) { Eterm s[2]; if (allctr) names[len] = am_a[i]; s[0] = bld_tuple(hpp, hszp, 4, am.sizes, bld_uint(hpp, hszp, stat[i].size), bld_uint(hpp, hszp, stat[i].max_size), bld_uint(hpp, hszp, stat[i].max_size_ever)); s[1] = bld_tuple(hpp, hszp, 4, am.blocks, bld_uint(hpp, hszp, stat[i].blocks), bld_uint(hpp, hszp, stat[i].max_blocks), bld_uint(hpp, hszp, stat[i].max_blocks_ever)); values[len] = bld_list(hpp, hszp, 2, s); len++; } } res = bld_2tup_list(hpp, hszp, len, names, values); if (!hpp) { hp = HAlloc(proc, hsz); hszp = NULL; hpp = &hp; goto restart_bld; } erts_free(ERTS_ALC_T_TMP, (void *) stat); erts_free(ERTS_ALC_T_TMP, (void *) values); if (allctr) erts_free(ERTS_ALC_T_TMP, (void *) names); return res; }
BIF_RETTYPE purge_module_1(BIF_ALIST_1) { ErtsCodeIndex code_ix; BeamInstr* code; BeamInstr* end; Module* modp; int is_blocking = 0; Eterm ret; if (is_not_atom(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD1(bif_export[BIF_purge_module_1], BIF_P, BIF_ARG_1); } code_ix = erts_active_code_ix(); /* * Correct module? */ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); } else { erts_rwlock_old_code(code_ix); /* * Any code to purge? */ if (!modp->old.code_hdr) { ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); } else { /* * Unload any NIF library */ if (modp->old.nif != NULL) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); erts_unload_nif(modp->old.nif); modp->old.nif = NULL; } /* * Remove the old code. */ ASSERT(erts_total_code_size >= modp->old.code_length); erts_total_code_size -= modp->old.code_length; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->old.catches, code, modp->old.code_length, code_ix); decrement_refc(modp->old.code_hdr); if (modp->old.code_hdr->literals_start) { erts_free(ERTS_ALC_T_LITERAL, modp->old.code_hdr->literals_start); } erts_free(ERTS_ALC_T_CODE, (void *) code); modp->old.code_hdr = NULL; modp->old.code_length = 0; modp->old.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); ERTS_BIF_PREP_RET(ret, am_true); } erts_rwunlock_old_code(code_ix); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return ret; }
static void ethr_ll_free(void *ptr) { erts_free(ERTS_ALC_T_ETHR_LL, ptr); }
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Module* modp; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } /* ToDo: Use code_ix staging instead of thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->curr.code_hdr) { error: erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } if (modp->curr.code_hdr->on_load_function_ptr == NULL) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } if (BIF_ARG_2 == am_true) { int i; /* * The on_load function succeded. Fix up export entries. */ for (i = 0; i < export_list_size(code_ix); i++) { Export *ep = export_list(i,code_ix); if (ep != NULL && ep->code[0] == BIF_ARG_1 && ep->code[4] != 0) { ep->addressv[code_ix] = (void *) ep->code[4]; ep->code[4] = 0; } } modp->curr.code_hdr->on_load_function_ptr = NULL; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { BeamInstr* code; BeamInstr* end; /* * The on_load function failed. Remove the loaded code. * This is an combination of delete and purge. We purge * the current code; the old code is not touched. */ erts_total_code_size -= modp->curr.code_length; code = (BeamInstr*) modp->curr.code_hdr; end = (BeamInstr *) ((char *)code + modp->curr.code_length); erts_cleanup_funs_on_purge(code, end); beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length, erts_active_code_ix()); if (modp->curr.code_hdr->literals_start) { erts_free(ERTS_ALC_T_LITERAL, modp->curr.code_hdr->literals_start); } erts_free(ERTS_ALC_T_CODE, modp->curr.code_hdr); modp->curr.code_hdr = NULL; modp->curr.code_length = 0; modp->curr.catches = BEAM_CATCHES_NIL; erts_remove_from_ranges(code); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); }
static Eterm do_chksum(ChksumFun sumfun, Process *p, Eterm ioterm, int left, void *sum, int *res, int *err) { Eterm *objp; Eterm obj; int c; DECLARE_ESTACK(stack); unsigned char *bytes = NULL; int numbytes = 0; *err = 0; if (left <= 0 || is_nil(ioterm)) { DESTROY_ESTACK(stack); *res = 0; return ioterm; } if(is_binary(ioterm)) { Uint bitoffs; Uint bitsize; Uint size; Eterm res_term = NIL; unsigned char *bytes; byte *temp_alloc = NULL; ERTS_GET_BINARY_BYTES(ioterm, bytes, bitoffs, bitsize); if (bitsize != 0) { *res = 0; *err = 1; DESTROY_ESTACK(stack); return NIL; } if (bitoffs != 0) { bytes = erts_get_aligned_binary_bytes(ioterm, &temp_alloc); /* The call to erts_get_aligned_binary_bytes cannot fail as we'we already checked bitsize and that this is a binary */ } size = binary_size(ioterm); if (size > left) { Eterm *hp; ErlSubBin *sb; Eterm orig; Uint offset; /* Split the binary in two parts, of which we only process the first */ hp = HAlloc(p, ERL_SUB_BIN_SIZE); sb = (ErlSubBin *) hp; ERTS_GET_REAL_BIN(ioterm, orig, offset, bitoffs, bitsize); sb->thing_word = HEADER_SUB_BIN; sb->size = size - left; sb->offs = offset + left; sb->orig = orig; sb->bitoffs = bitoffs; sb->bitsize = bitsize; sb->is_writable = 0; res_term = make_binary(sb); size = left; } (*sumfun)(sum, bytes, size); *res = size; DESTROY_ESTACK(stack); erts_free_aligned_binary_bytes(temp_alloc); return res_term; } if (!is_list(ioterm)) { *res = 0; *err = 1; DESTROY_ESTACK(stack); return NIL; } /* OK a list, needs to be processed in order, handling each flat list-level as they occur, just like io_list_to_binary would */ *res = 0; ESTACK_PUSH(stack,ioterm); while (!ESTACK_ISEMPTY(stack) && left) { ioterm = ESTACK_POP(stack); if (is_nil(ioterm)) { /* ignore empty lists */ continue; } if(is_list(ioterm)) { L_Again: /* Restart with sublist, old listend was pushed on stack */ objp = list_val(ioterm); obj = CAR(objp); for(;;) { /* loop over one flat list of bytes and binaries until sublist or list end is encountered */ if (is_byte(obj)) { int bsize = 0; for(;;) { if (bsize >= numbytes) { if (!bytes) { bytes = erts_alloc(ERTS_ALC_T_TMP, numbytes = 500); } else { if (numbytes > left) { numbytes += left; } else { numbytes *= 2; } bytes = erts_realloc(ERTS_ALC_T_TMP, bytes, numbytes); } } bytes[bsize++] = (unsigned char) unsigned_val(obj); --left; ioterm = CDR(objp); if (!is_list(ioterm)) { break; } objp = list_val(ioterm); obj = CAR(objp); if (!is_byte(obj)) break; if (!left) { break; } } (*sumfun)(sum, bytes, bsize); *res += bsize; } else if (is_nil(obj)) { ioterm = CDR(objp); if (!is_list(ioterm)) { break; } objp = list_val(ioterm); obj = CAR(objp); } else if (is_list(obj)) { /* push rest of list for later processing, start again with sublist */ ESTACK_PUSH(stack,CDR(objp)); ioterm = obj; goto L_Again; } else if (is_binary(obj)) { int sres, serr; Eterm rest_term; rest_term = do_chksum(sumfun, p, obj, left, sum, &sres, &serr); *res += sres; if (serr != 0) { *err = 1; DESTROY_ESTACK(stack); if (bytes != NULL) erts_free(ERTS_ALC_T_TMP, bytes); return NIL; } left -= sres; if (rest_term != NIL) { Eterm *hp; hp = HAlloc(p, 2); obj = CDR(objp); ioterm = CONS(hp, rest_term, obj); left = 0; break; } ioterm = CDR(objp); if (is_list(ioterm)) { /* objp and obj need to be updated if loop is to continue */ objp = list_val(ioterm); obj = CAR(objp); } } else { *err = 1; DESTROY_ESTACK(stack); if (bytes != NULL) erts_free(ERTS_ALC_T_TMP, bytes); return NIL; } if (!left || is_nil(ioterm) || !is_list(ioterm)) { break; } } /* for(;;) */ } /* is_list(ioterm) */ if (!left) { #ifdef ALLOW_BYTE_TAIL if (is_byte(ioterm)) { /* inproper list with byte tail*/ Eterm *hp; hp = HAlloc(p, 2); ioterm = CONS(hp, ioterm, NIL); } #else ; #endif } else if (!is_list(ioterm) && !is_nil(ioterm)) { /* inproper list end */ #ifdef ALLOW_BYTE_TAIL if (is_byte(ioterm)) { unsigned char b[1]; b[0] = (unsigned char) unsigned_val(ioterm); (*sumfun)(sum, b, 1); ++(*res); --left; ioterm = NIL; } else #endif if is_binary(ioterm) { int sres, serr; ioterm = do_chksum(sumfun, p, ioterm, left, sum, &sres, &serr); *res +=sres; if (serr != 0) { *err = 1; DESTROY_ESTACK(stack); if (bytes != NULL) erts_free(ERTS_ALC_T_TMP, bytes); return NIL; } left -= sres; } else { *err = 1; DESTROY_ESTACK(stack); if (bytes != NULL) erts_free(ERTS_ALC_T_TMP, bytes); return NIL; } } } /* while left and not estack empty */
static void free_port_data_heap(void *vpdhp) { erts_cleanup_offheap(&((ErtsPortDataHeap *) vpdhp)->off_heap); erts_free(ERTS_ALC_T_PORT_DATA_HEAP, vpdhp); }
/* * This function is responsible for enabling, disabling, resetting and * gathering data related to microstate accounting. * * Managed threads and unmanaged threads are handled differently. * - managed threads get a misc_aux job telling them to switch on msacc * - unmanaged have some fields protected by a mutex that has to be taken * before any values can be updated * * For performance reasons there is also a global value erts_msacc_enabled * that controls the state of all threads. Statistics gathering is only on * if erts_msacc_enabled && msacc is true. */ Eterm erts_msacc_request(Process *c_p, int action, Eterm *threads) { #ifdef ERTS_ENABLE_MSACC ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET(); ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); Eterm ref; ErtsMSAccReq *msaccrp; Eterm *hp; #ifdef ERTS_MSACC_ALWAYS_ON if (action == ERTS_MSACC_ENABLE || action == ERTS_MSACC_DISABLE) return THE_NON_VALUE; #else /* take care of double enable, and double disable here */ if (msacc && action == ERTS_MSACC_ENABLE) { return THE_NON_VALUE; } else if (!msacc && action == ERTS_MSACC_DISABLE) { return THE_NON_VALUE; } #endif ref = erts_make_ref(c_p); msaccrp = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMSAccReq)); hp = &msaccrp->ref_heap[0]; msaccrp->action = action; msaccrp->proc = c_p; msaccrp->ref = STORE_NC(&hp, NULL, ref); msaccrp->req_sched = esdp->no; #ifdef ERTS_SMP *threads = erts_no_schedulers; *threads += 1; /* aux thread */ #else *threads = 1; #endif erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); erts_proc_add_refc(c_p, *threads); if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_msacc, (void *) msaccrp); #ifdef ERTS_SMP /* aux thread */ erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp); #endif #ifdef USE_THREADS /* Manage unmanaged threads */ switch (action) { case ERTS_MSACC_GATHER: { Uint unmanaged_count; ErtsMsAcc *msacc, **unmanaged; int i = 0; /* we copy a list of pointers here so that we do not have to have the msacc_mutex when sending messages */ erts_rwmtx_rlock(&msacc_mutex); unmanaged_count = msacc_unmanaged_count; unmanaged = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc*)*unmanaged_count); for (i = 0, msacc = msacc_unmanaged; i < unmanaged_count; i++, msacc = msacc->next) { unmanaged[i] = msacc; } erts_rwmtx_runlock(&msacc_mutex); for (i = 0; i < unmanaged_count; i++) { erts_mtx_lock(&unmanaged[i]->mtx); if (unmanaged[i]->perf_counter) { ErtsSysPerfCounter perf_counter; /* if enabled update stats */ perf_counter = erts_sys_perf_counter(); unmanaged[i]->perf_counters[unmanaged[i]->state] += perf_counter - unmanaged[i]->perf_counter; unmanaged[i]->perf_counter = perf_counter; } erts_mtx_unlock(&unmanaged[i]->mtx); send_reply(unmanaged[i],msaccrp); } erts_free(ERTS_ALC_T_MSACC,unmanaged); /* We have just sent unmanaged_count messages, so bump no of threads */ *threads += unmanaged_count; break; } case ERTS_MSACC_RESET: { ErtsMsAcc *msacc; erts_rwmtx_rlock(&msacc_mutex); for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) erts_msacc_reset(msacc); erts_rwmtx_runlock(&msacc_mutex); break; } case ERTS_MSACC_ENABLE: { erts_rwmtx_rlock(&msacc_mutex); for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) { erts_mtx_lock(&msacc->mtx); msacc->perf_counter = erts_sys_perf_counter(); /* we assume the unmanaged thread is sleeping */ msacc->state = ERTS_MSACC_STATE_SLEEP; erts_mtx_unlock(&msacc->mtx); } erts_rwmtx_runlock(&msacc_mutex); break; } case ERTS_MSACC_DISABLE: { ErtsSysPerfCounter perf_counter; erts_rwmtx_rlock(&msacc_mutex); /* make sure to update stats with latest results */ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) { erts_mtx_lock(&msacc->mtx); perf_counter = erts_sys_perf_counter(); msacc->perf_counters[msacc->state] += perf_counter - msacc->perf_counter; msacc->perf_counter = 0; erts_mtx_unlock(&msacc->mtx); } erts_rwmtx_runlock(&msacc_mutex); break; } default: { ASSERT(0); } } #endif *threads = make_small(*threads); reply_msacc((void *) msaccrp); #ifndef ERTS_MSACC_ALWAYS_ON /* enable/disable the global value */ if (action == ERTS_MSACC_ENABLE) { erts_msacc_enabled = 1; } else if (action == ERTS_MSACC_DISABLE) { erts_msacc_enabled = 0; } #endif return ref; #else return THE_NON_VALUE; #endif }
static byte* convert_environment(Process* p, Eterm env) { Eterm all; Eterm* temp_heap; Eterm* hp; Uint heap_size; int n; Sint size; byte* bytes; int encoding = erts_get_native_filename_encoding(); if ((n = list_length(env)) < 0) { return NULL; } heap_size = 2*(5*n+1); temp_heap = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, heap_size*sizeof(Eterm)); bytes = NULL; /* Indicating error */ /* * All errors below are handled by jumping to 'done', to ensure that the memory * gets deallocated. Do NOT return directly from this function. */ all = CONS(hp, make_small(0), NIL); hp += 2; while(is_list(env)) { Eterm tmp; Eterm* tp; tmp = CAR(list_val(env)); if (is_not_tuple_arity(tmp, 2)) { goto done; } tp = tuple_val(tmp); tmp = CONS(hp, make_small(0), NIL); hp += 2; if (tp[2] != am_false) { tmp = CONS(hp, tp[2], tmp); hp += 2; } tmp = CONS(hp, make_small('='), tmp); hp += 2; tmp = CONS(hp, tp[1], tmp); hp += 2; all = CONS(hp, tmp, all); hp += 2; env = CDR(list_val(env)); } if (is_not_nil(env)) { goto done; } if ((size = erts_native_filename_need(all,encoding)) < 0) { goto done; } /* * Put the result in a binary (no risk for a memory leak that way). */ (void) erts_new_heap_binary(p, NULL, size, &bytes); erts_native_filename_put(all,encoding,bytes); done: erts_free(ERTS_ALC_T_TMP, temp_heap); return bytes; }
static BIF_RETTYPE port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3) { Uint op; Port *p; Uint size; byte *bytes; byte *endp; ErlDrvSizeT real_size; erts_driver_t *drv; byte port_input[256]; /* Default input buffer to encode in */ byte port_result[256]; /* Buffer for result from port. */ byte* port_resp; /* Pointer to result buffer. */ char *prc; ErlDrvSSizeT ret; Eterm res; Sint result_size; Eterm *hp; Eterm *hp_end; /* To satisfy hybrid heap architecture */ unsigned ret_flags = 0U; int fpe_was_unmasked; bytes = &port_input[0]; port_resp = port_result; /* trace of port scheduling with virtual process descheduling * lock wait */ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(c_p, am_out); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(c_p, am_inactive); } p = id_or_name2port(c_p, arg1); if (!p) { error: if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) { driver_free(port_resp); } if (bytes != &port_input[0]) erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes); /* Need to virtual schedule in the process if there * was an error. */ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(c_p, am_in); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(c_p, am_active); } if (p) erts_port_release(p); #ifdef ERTS_SMP ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN); #else ERTS_BIF_CHK_EXITED(c_p); #endif BIF_ERROR(c_p, BADARG); } if ((drv = p->drv_ptr) == NULL) { goto error; } if (drv->call == NULL) { goto error; } if (!term_to_Uint(arg2, &op)) { goto error; } p->caller = c_p->id; /* Lock taken, virtual schedule of port */ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_in, am_call); } if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { profile_runnable_port(p, am_active); } size = erts_encode_ext_size(arg3); if (size > sizeof(port_input)) bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size); endp = bytes; erts_encode_ext(arg3, &endp); real_size = endp - bytes; if (real_size > size) { erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n", __FILE__, __LINE__, endp - (bytes + size)); } erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(driver_call)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); dtrace_pid_str(p->connected, process_str); dtrace_port_str(p, port_str); DTRACE5(driver_call, process_str, port_str, p->name, op, real_size); } #endif prc = (char *) port_resp; fpe_was_unmasked = erts_block_fpe(); ret = drv->call((ErlDrvData)p->drv_data, (unsigned) op, (char *) bytes, (int) real_size, &prc, (int) sizeof(port_result), &ret_flags); erts_unblock_fpe(fpe_was_unmasked); if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_out, am_call); } if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { profile_runnable_port(p, am_inactive); } port_resp = (byte *) prc; p->caller = NIL; erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); #ifdef HARDDEBUG { ErlDrvSizeT z; printf("real_size = %ld,%d, ret = %ld,%d\r\n", (unsigned long) real_size, (int) real_size, (unsigned long)ret, (int) ret); printf("["); for(z = 0; z < real_size; ++z) { printf("%d, ",(int) bytes[z]); } printf("]\r\n"); printf("["); for(z = 0; z < ret; ++z) { printf("%d, ",(int) port_resp[z]); } printf("]\r\n"); } #endif if (ret <= 0 || port_resp[0] != VERSION_MAGIC) { /* Error or a binary without magic/ with wrong magic */ goto error; } result_size = erts_decode_ext_size(port_resp, ret); if (result_size < 0) { goto error; } hp = HAlloc(c_p, result_size); hp_end = hp + result_size; endp = port_resp; res = erts_decode_ext(&hp, &MSO(c_p), &endp); if (res == THE_NON_VALUE) { goto error; } HRelease(c_p, hp_end, hp); if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) { driver_free(port_resp); } if (bytes != &port_input[0]) erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes); if (p) erts_port_release(p); #ifdef ERTS_SMP ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN); #else ERTS_BIF_CHK_EXITED(c_p); #endif if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(c_p, am_in); } if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { profile_runnable_proc(c_p, am_active); } return res; }
void enif_free(void* ptr) { erts_free(ERTS_ALC_T_NIF, ptr); }
static int open_port(Process* p, Eterm name, Eterm settings, int *err_nump) { #define OPEN_PORT_ERROR(VAL) do { port_num = (VAL); goto do_return; } while (0) int i, port_num; Eterm option; Uint arity; Eterm* tp; Uint* nargs; erts_driver_t* driver; char* name_buf = NULL; SysDriverOpts opts; int binary_io; int soft_eof; Sint linebuf; Eterm edir = NIL; byte dir[MAXPATHLEN]; /* These are the defaults */ opts.packet_bytes = 0; opts.use_stdio = 1; opts.redir_stderr = 0; opts.read_write = 0; opts.hide_window = 0; opts.wd = NULL; opts.envir = NULL; opts.exit_status = 0; opts.overlapped_io = 0; opts.spawn_type = ERTS_SPAWN_ANY; opts.argv = NULL; binary_io = 0; soft_eof = 0; linebuf = 0; *err_nump = 0; if (is_not_list(settings) && is_not_nil(settings)) { goto badarg; } /* * Parse the settings. */ if (is_not_nil(settings)) { nargs = list_val(settings); while (1) { if (is_tuple_arity(*nargs, 2)) { tp = tuple_val(*nargs); arity = *tp++; option = *tp++; if (option == am_packet) { if (is_not_small(*tp)) { goto badarg; } opts.packet_bytes = signed_val(*tp); switch (opts.packet_bytes) { case 1: case 2: case 4: break; default: goto badarg; } } else if (option == am_line) { if (is_not_small(*tp)) { goto badarg; } linebuf = signed_val(*tp); if (linebuf <= 0) { goto badarg; } } else if (option == am_env) { byte* bytes; if ((bytes = convert_environment(p, *tp)) == NULL) { goto badarg; } opts.envir = (char *) bytes; } else if (option == am_args) { char **av; char **oav = opts.argv; if ((av = convert_args(*tp)) == NULL) { goto badarg; } opts.argv = av; if (oav) { opts.argv[0] = oav[0]; oav[0] = erts_default_arg0; free_args(oav); } } else if (option == am_arg0) { char *a0; if ((a0 = erts_convert_filename_to_native(*tp, ERTS_ALC_T_TMP, 1)) == NULL) { goto badarg; } if (opts.argv == NULL) { opts.argv = erts_alloc(ERTS_ALC_T_TMP, 2 * sizeof(char **)); opts.argv[0] = a0; opts.argv[1] = NULL; } else { if (opts.argv[0] != erts_default_arg0) { erts_free(ERTS_ALC_T_TMP, opts.argv[0]); } opts.argv[0] = a0; } } else if (option == am_cd) { edir = *tp; } else { goto badarg; } } else if (*nargs == am_stream) { opts.packet_bytes = 0; } else if (*nargs == am_use_stdio) { opts.use_stdio = 1; } else if (*nargs == am_stderr_to_stdout) { opts.redir_stderr = 1; } else if (*nargs == am_line) { linebuf = 512; } else if (*nargs == am_nouse_stdio) { opts.use_stdio = 0; } else if (*nargs == am_binary) { binary_io = 1; } else if (*nargs == am_in) { opts.read_write |= DO_READ; } else if (*nargs == am_out) { opts.read_write |= DO_WRITE; } else if (*nargs == am_eof) { soft_eof = 1; } else if (*nargs == am_hide) { opts.hide_window = 1; } else if (*nargs == am_exit_status) { opts.exit_status = 1; } else if (*nargs == am_overlapped_io) { opts.overlapped_io = 1; } else { goto badarg; } if (is_nil(*++nargs)) break; if (is_not_list(*nargs)) { goto badarg; } nargs = list_val(*nargs); } } if (opts.read_write == 0) /* implement default */ opts.read_write = DO_READ|DO_WRITE; /* Mutually exclusive arguments. */ if((linebuf && opts.packet_bytes) || (opts.redir_stderr && !opts.use_stdio)) { goto badarg; } /* * Parse the first argument and start the appropriate driver. */ if (is_atom(name) || (i = is_string(name))) { /* a vanilla port */ if (is_atom(name)) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, atom_tab(atom_val(name))->len+1); sys_memcpy((void *) name_buf, (void *) atom_tab(atom_val(name))->name, atom_tab(atom_val(name))->len); name_buf[atom_tab(atom_val(name))->len] = '\0'; } else { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1); if (intlist_to_buf(name, name_buf, i) != i) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); name_buf[i] = '\0'; } driver = &vanilla_driver; } else { if (is_not_tuple(name)) { goto badarg; /* Not a process or fd port */ } tp = tuple_val(name); arity = *tp++; if (arity == make_arityval(0)) { goto badarg; } if (*tp == am_spawn || *tp == am_spawn_driver) { /* A process port */ if (arity != make_arityval(2)) { goto badarg; } name = tp[1]; if (is_atom(name)) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, atom_tab(atom_val(name))->len+1); sys_memcpy((void *) name_buf, (void *) atom_tab(atom_val(name))->name, atom_tab(atom_val(name))->len); name_buf[atom_tab(atom_val(name))->len] = '\0'; } else if ((i = is_string(name))) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1); if (intlist_to_buf(name, name_buf, i) != i) erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__); name_buf[i] = '\0'; } else { goto badarg; } if (*tp == am_spawn_driver) { opts.spawn_type = ERTS_SPAWN_DRIVER; } driver = &spawn_driver; } else if (*tp == am_spawn_executable) { /* A program */ /* * {spawn_executable,Progname} */ if (arity != make_arityval(2)) { goto badarg; } name = tp[1]; if ((name_buf = erts_convert_filename_to_native(name,ERTS_ALC_T_TMP,0)) == NULL) { goto badarg; } opts.spawn_type = ERTS_SPAWN_EXECUTABLE; driver = &spawn_driver; } else if (*tp == am_fd) { /* An fd port */ int n; struct Sint_buf sbuf; char* p; if (arity != make_arityval(3)) { goto badarg; } if (is_not_small(tp[1]) || is_not_small(tp[2])) { goto badarg; } opts.ifd = unsigned_val(tp[1]); opts.ofd = unsigned_val(tp[2]); /* Syntesize name from input and output descriptor. */ name_buf = erts_alloc(ERTS_ALC_T_TMP, 2*sizeof(struct Sint_buf) + 2); p = Sint_to_buf(opts.ifd, &sbuf); n = sys_strlen(p); sys_strncpy(name_buf, p, n); name_buf[n] = '/'; p = Sint_to_buf(opts.ofd, &sbuf); sys_strcpy(name_buf+n+1, p); driver = &fd_driver; } else { goto badarg; } } if ((driver != &spawn_driver && opts.argv != NULL) || (driver == &spawn_driver && opts.spawn_type != ERTS_SPAWN_EXECUTABLE && opts.argv != NULL)) { /* Argument vector only if explicit spawn_executable */ goto badarg; } if (edir != NIL) { /* A working directory is expressed differently if spawn_executable, i.e. Unicode is handles for spawn_executable... */ if (opts.spawn_type != ERTS_SPAWN_EXECUTABLE) { Eterm iolist; DeclareTmpHeap(heap,4,p); int r; UseTmpHeap(4,p); heap[0] = edir; heap[1] = make_list(heap+2); heap[2] = make_small(0); heap[3] = NIL; iolist = make_list(heap); r = io_list_to_buf(iolist, (char*) dir, MAXPATHLEN); UnUseTmpHeap(4,p); if (r < 0) { goto badarg; } opts.wd = (char *) dir; } else { if ((opts.wd = erts_convert_filename_to_native(edir,ERTS_ALC_T_TMP,0)) == NULL) { goto badarg; } } } if (driver != &spawn_driver && opts.exit_status) { goto badarg; } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_out); } erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); port_num = erts_open_driver(driver, p->id, name_buf, &opts, err_nump); #ifdef USE_VM_PROBES if (port_num >= 0 && DTRACE_ENABLED(port_open)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(p, process_str); erts_snprintf(port_str, sizeof(port_str), "%T", erts_port[port_num].id); DTRACE3(port_open, process_str, name_buf, port_str); } #endif erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); if (port_num < 0) { DEBUGF(("open_driver returned %d(%d)\n", port_num, *err_nump)); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } OPEN_PORT_ERROR(port_num); } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } if (binary_io) { erts_port_status_bor_set(&erts_port[port_num], ERTS_PORT_SFLG_BINARY_IO); } if (soft_eof) { erts_port_status_bor_set(&erts_port[port_num], ERTS_PORT_SFLG_SOFT_EOF); } if (linebuf && erts_port[port_num].linebuf == NULL){ erts_port[port_num].linebuf = allocate_linebuf(linebuf); erts_port_status_bor_set(&erts_port[port_num], ERTS_PORT_SFLG_LINEBUF_IO); } do_return: if (name_buf) erts_free(ERTS_ALC_T_TMP, (void *) name_buf); if (opts.argv) { free_args(opts.argv); } if (opts.wd && opts.wd != ((char *)dir)) { erts_free(ERTS_ALC_T_TMP, (void *) opts.wd); } return port_num; badarg: *err_nump = BADARG; OPEN_PORT_ERROR(-3); goto do_return; #undef OPEN_PORT_ERROR }
static void tmp_alloc_dtor(struct enif_tmp_obj_t* obj) { erts_free(obj->allocator, obj); }
BIF_RETTYPE subtract_2(BIF_ALIST_2) { Eterm list; Eterm* hp; Uint need; Eterm res; Eterm small_vec[10]; /* Preallocated memory for small lists */ Eterm* vec_p; Eterm* vp; int i; int n; int m; if ((n = list_length(BIF_ARG_1)) < 0) { BIF_ERROR(BIF_P, BADARG); } if ((m = list_length(BIF_ARG_2)) < 0) { BIF_ERROR(BIF_P, BADARG); } if (n == 0) BIF_RET(NIL); if (m == 0) BIF_RET(BIF_ARG_1); /* allocate element vector */ if (n <= sizeof(small_vec)/sizeof(small_vec[0])) vec_p = small_vec; else vec_p = (Eterm*) erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm)); /* PUT ALL ELEMENTS IN VP */ vp = vec_p; list = BIF_ARG_1; i = n; while(i--) { Eterm* listp = list_val(list); *vp++ = CAR(listp); list = CDR(listp); } /* UNMARK ALL DELETED CELLS */ list = BIF_ARG_2; m = 0; /* number of deleted elements */ while(is_list(list)) { Eterm* listp = list_val(list); Eterm elem = CAR(listp); i = n; vp = vec_p; while(i--) { if (is_value(*vp) && eq(*vp, elem)) { *vp = THE_NON_VALUE; m++; break; } vp++; } list = CDR(listp); } if (m == n) /* All deleted ? */ res = NIL; else if (m == 0) /* None deleted ? */ res = BIF_ARG_1; else { /* REBUILD LIST */ res = NIL; need = 2*(n - m); hp = HAlloc(BIF_P, need); vp = vec_p + n - 1; while(vp >= vec_p) { if (is_value(*vp)) { res = CONS(hp, *vp, res); hp += 2; } vp--; } } if (vec_p != small_vec) erts_free(ERTS_ALC_T_TMP, (void *) vec_p); BIF_RET(res); }
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *bp, Uint size, Eterm *brefs, Uint brefs_size) { #ifdef DEBUG int i; #endif #ifdef HARD_DEBUG ErlHeapFragment *dbg_bp; Eterm *dbg_brefs; Uint dbg_size; Uint dbg_tot_size; Eterm *dbg_hp; #endif ErlHeapFragment* nbp; #ifdef DEBUG { Uint off_sz = size < bp->used_size ? size : bp->used_size; for (i = 0; i < brefs_size; i++) { Eterm *ptr; if (is_immed(brefs[i])) continue; ptr = ptr_val(brefs[i]); ASSERT(&bp->mem[0] <= ptr && ptr < &bp->mem[0] + off_sz); } } #endif if (size >= (bp->used_size - bp->used_size / 16)) { bp->used_size = size; return bp; } #ifdef HARD_DEBUG dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size); dbg_bp = new_message_buffer(bp->used_size); dbg_hp = dbg_bp->mem; dbg_tot_size = 0; for (i = 0; i < brefs_size; i++) { dbg_size = size_object(brefs[i]); dbg_tot_size += dbg_size; dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp, &dbg_bp->off_heap); } ASSERT(dbg_tot_size == (size < bp->used_size ? size : bp->used_size)); #endif nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG, (void *) bp, ERTS_HEAP_FRAG_SIZE(bp->alloc_size), ERTS_HEAP_FRAG_SIZE(size)); if (bp != nbp) { Uint off_sz = size < nbp->used_size ? size : nbp->used_size; Eterm *sp = &bp->mem[0]; Eterm *ep = sp + off_sz; Sint offs = &nbp->mem[0] - sp; erts_offset_off_heap(&nbp->off_heap, offs, sp, ep); erts_offset_heap(&nbp->mem[0], off_sz, offs, sp, ep); if (brefs && brefs_size) erts_offset_heap_ptr(brefs, brefs_size, offs, sp, ep); #ifdef DEBUG for (i = 0; i < brefs_size; i++) { Eterm *ptr; if (is_immed(brefs[i])) continue; ptr = ptr_val(brefs[i]); ASSERT(&nbp->mem[0] <= ptr && ptr < &nbp->mem[0] + off_sz); } #endif } nbp->alloc_size = size; nbp->used_size = size; #ifdef HARD_DEBUG for (i = 0; i < brefs_size; i++) ASSERT(eq(dbg_brefs[i], brefs[i])); free_message_buffer(dbg_bp); erts_free(ERTS_ALC_T_UNDEF, dbg_brefs); #endif return nbp; }
void erts_sys_ddll_free_error(ErtsSysDdllError* err) { if (err->str != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, err->str); } }