static erts_lc_locked_locks_t * create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); l_lcks->required.first = NULL; l_lcks->required.last = NULL; l_lcks->locked.first = NULL; l_lcks->locked.last = NULL; l_lcks->prev = NULL; lc_lock(); l_lcks->next = erts_locked_locks; if (erts_locked_locks) erts_locked_locks->prev = l_lcks; erts_locked_locks = l_lcks; lc_unlock(); erts_tsd_set(locks_key, (void *) l_lcks); return l_lcks; }
static lc_thread_t * create_thread_data(char *thread_name) { lc_thread_t *thr = malloc(sizeof(lc_thread_t)); if (!thr) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); thr->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!thr->thread_name) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); thr->emu_thread = 0; thr->tid = erts_thr_self(); thr->required.first = NULL; thr->required.last = NULL; thr->locked.first = NULL; thr->locked.last = NULL; thr->prev = NULL; thr->free_blocks = NULL; thr->chunks = NULL; sys_memzero(&thr->matrix, sizeof(thr->matrix)); lc_lock_threads(); thr->next = lc_threads; if (lc_threads) lc_threads->prev = thr; lc_threads = thr; lc_unlock_threads(); erts_tsd_set(locks_key, (void *) thr); return thr; }
static lc_locked_lock_t *lc_core_alloc(lc_thread_t* thr) { int i; lc_alloc_chunk_t* chunk; lc_free_block_t* fbs; chunk = (lc_alloc_chunk_t*) malloc(sizeof(lc_alloc_chunk_t)); if (!chunk) { ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); } chunk->next = thr->chunks; thr->chunks = chunk; fbs = chunk->array; for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG sys_memset((void *) &fbs[i], 0xdf, sizeof(lc_free_block_t)); #endif fbs[i].next = &fbs[i+1]; } #ifdef DEBUG sys_memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], 0xdf, sizeof(lc_free_block_t)); #endif fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = thr->free_blocks; thr->free_blocks = &fbs[1]; return &fbs[0].lock; }
void erts_lc_init(void) { #ifdef ERTS_LC_STATIC_ALLOC int i; static erts_lc_free_block_t fbs[ERTS_LC_FB_CHUNK_SIZE]; for (i = 0; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[i].next = &fbs[i+1]; } #ifdef DEBUG memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = NULL; free_blocks = &fbs[0]; #else /* #ifdef ERTS_LC_STATIC_ALLOC */ free_blocks = NULL; #endif /* #ifdef ERTS_LC_STATIC_ALLOC */ if (ethr_spinlock_init(&free_blocks_lock) != 0) ERTS_INTERNAL_ERROR("spinlock_init failed"); erts_tsd_key_create(&locks_key,"erts_lock_check_key"); }
int efile_seek(efile_data_t *d, enum efile_seek_t seek, Sint64 offset, Sint64 *new_position) { efile_unix_t *u = (efile_unix_t*)d; off_t result; int whence; switch(seek) { case EFILE_SEEK_BOF: whence = SEEK_SET; break; case EFILE_SEEK_CUR: whence = SEEK_CUR; break; case EFILE_SEEK_EOF: whence = SEEK_END; break; default: ERTS_INTERNAL_ERROR("Invalid seek parameter"); } result = lseek(u->fd, offset, whence); /* * The man page for lseek (on SunOs 5) says: * * "if fildes is a remote file descriptor and offset is negative, lseek() * returns the file pointer even if it is negative." */ if(result < 0 && errno == 0) { errno = EINVAL; } if(result < 0) { u->common.posix_errno = errno; return 0; } (*new_position) = result; return 1; }
/* * erts_internal:port_close/1 is used by the * erlang:port_close/1 BIF. */ BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1) { Eterm ref; Port *prt; #ifdef DEBUG ref = NIL; #endif prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); switch (erts_port_exit(BIF_P, 0, prt, prt->common.id, am_normal, &ref)) { case ERTS_PORT_OP_CALLER_EXIT: case ERTS_PORT_OP_BADARG: case ERTS_PORT_OP_DROPPED: BIF_RET(am_badarg); case ERTS_PORT_OP_SCHEDULED: ASSERT(is_internal_ref(ref)); BIF_RET(ref); case ERTS_PORT_OP_DONE: BIF_RET(am_true); default: ERTS_INTERNAL_ERROR("Unexpected erts_port_exit() result"); BIF_RET(am_internal_error); } }
/* * erts_internal:port_connect/2 is used by the * erlang:port_connect/2 BIF. */ BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2) { Eterm ref; Port* prt; prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); #ifdef DEBUG ref = NIL; #endif switch (erts_port_connect(BIF_P, 0, prt, BIF_P->common.id, BIF_ARG_2, &ref)) { case ERTS_PORT_OP_CALLER_EXIT: case ERTS_PORT_OP_BADARG: case ERTS_PORT_OP_DROPPED: BIF_RET(am_badarg); case ERTS_PORT_OP_SCHEDULED: ASSERT(is_internal_ordinary_ref(ref)); BIF_RET(ref); break; case ERTS_PORT_OP_DONE: BIF_RET(am_true); break; default: ERTS_INTERNAL_ERROR("Unexpected erts_port_connect() result"); BIF_RET(am_internal_error); } }
static void *lc_core_alloc(void) { int i; erts_lc_free_block_t *fbs; lc_unlock(); fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t) * ERTS_LC_FB_CHUNK_SIZE); if (!fbs) { ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); } for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[i].next = &fbs[i+1]; } #ifdef DEBUG memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], 0xdf, sizeof(erts_lc_free_block_t)); #endif lc_lock(); fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = free_blocks; free_blocks = &fbs[1]; return (void *) &fbs[0]; }
static const char *rw_op_str(erts_lock_options_t options) { if(options == ERTS_LOCK_OPTIONS_WRITE) { ERTS_INTERNAL_ERROR("Only write flag present"); } return erts_lock_options_get_short_desc(options); }
void erts_lc_init(void) { if (ethr_spinlock_init(&lc_threads_lock) != 0) ERTS_INTERNAL_ERROR("spinlock_init failed"); erts_tsd_key_create(&locks_key,"erts_lock_check_key"); }
/** @brief Reads an entire file into \c result, stopping after \c size bytes or * EOF. It will read until EOF if size is 0. */ static posix_errno_t read_file(efile_data_t *d, size_t size, ErlNifBinary *result) { size_t initial_buffer_size; ssize_t bytes_read; if(size == 0) { initial_buffer_size = 16 << 10; } else { initial_buffer_size = size; } if(!enif_alloc_binary(initial_buffer_size, result)) { return ENOMEM; } bytes_read = 0; for(;;) { ssize_t block_bytes_read; SysIOVec read_vec[1]; read_vec[0].iov_base = result->data + bytes_read; read_vec[0].iov_len = result->size - bytes_read; block_bytes_read = efile_readv(d, read_vec, 1); if(block_bytes_read < 0) { enif_release_binary(result); return d->posix_errno; } bytes_read += block_bytes_read; if(block_bytes_read < (result->size - bytes_read)) { /* EOF */ break; } else if(bytes_read == size) { break; } if(!enif_realloc_binary(result, bytes_read * 2)) { enif_release_binary(result); return ENOMEM; } } /* The file may have shrunk since we queried its size, so we have to do * this even when the size is known. */ if(bytes_read < result->size && !enif_realloc_binary(result, bytes_read)) { ERTS_INTERNAL_ERROR("Failed to shrink read_file result."); } return 0; }
BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3) { Port* prt; Eterm retval; Uint uint_op; unsigned int op; erts_aint32_t state; prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); if (!term_to_Uint(BIF_ARG_2, &uint_op)) BIF_RET(am_badarg); if (uint_op > (Uint) UINT_MAX) BIF_RET(am_badarg); op = (unsigned int) uint_op; switch (erts_port_control(BIF_P, prt, op, BIF_ARG_3, &retval)) { case ERTS_PORT_OP_CALLER_EXIT: case ERTS_PORT_OP_BADARG: case ERTS_PORT_OP_DROPPED: retval = am_badarg; break; case ERTS_PORT_OP_SCHEDULED: ASSERT(is_internal_ordinary_ref(retval)); break; case ERTS_PORT_OP_DONE: ASSERT(is_not_internal_ref(retval)); break; default: ERTS_INTERNAL_ERROR("Unexpected erts_port_control() result"); retval = am_internal_error; break; } state = erts_smp_atomic32_read_acqb(&BIF_P->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { #ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); #endif ERTS_BIF_EXITED(BIF_P); } BIF_RET(retval); }
void erts_lc_set_thread_name(char *thread_name) { lc_thread_t *thr = get_my_locked_locks(); if (!thr) thr = create_thread_data(thread_name); else { ASSERT(thr->thread_name); free((void *) thr->thread_name); thr->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!thr->thread_name) ERTS_INTERNAL_ERROR("strdup failed"); } thr->emu_thread = 1; }
static char * rw_op_str(Uint16 flags) { switch (flags & ERTS_LC_FLG_LO_READ_WRITE) { case ERTS_LC_FLG_LO_READ_WRITE: return " (rw)"; case ERTS_LC_FLG_LO_READ: return " (r)"; case ERTS_LC_FLG_LO_WRITE: ERTS_INTERNAL_ERROR("Only write flag present"); default: break; } return ""; }
void erts_lc_set_thread_name(char *thread_name) { erts_lc_locked_locks_t *l_lcks = get_my_locked_locks(); if (!l_lcks) l_lcks = create_locked_locks(thread_name); else { ASSERT(l_lcks->thread_name); free((void *) l_lcks->thread_name); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) ERTS_INTERNAL_ERROR("strdup failed"); } l_lcks->emu_thread = 1; }
static void owner_death_callback(ErlNifEnv* env, void* obj, ErlNifPid* pid, ErlNifMonitor* mon) { efile_data_t *d = (efile_data_t*)obj; (void)env; (void)pid; (void)mon; for(;;) { enum efile_state_t previous_state; previous_state = erts_atomic32_cmpxchg_acqb(&d->state, EFILE_STATE_CLOSED, EFILE_STATE_IDLE); switch(previous_state) { case EFILE_STATE_IDLE: { /* We cannot close the file here as that could block a normal * scheduler, so we tell erts_prim_file to do it for us. * * This can in turn become a bottleneck (especially in cases * like NFS failure), but it's less problematic than blocking * thread progress. */ ERL_NIF_TERM message, file_ref; file_ref = enif_make_resource(env, d); message = enif_make_tuple2(env, am_close, file_ref); if(!enif_send(env, &erts_prim_file_pid, NULL, message)) { ERTS_INTERNAL_ERROR("Failed to defer prim_file close."); } return; } case EFILE_STATE_CLOSE_PENDING: case EFILE_STATE_CLOSED: /* We're either already closed or managed to mark ourselves for * closure in the previous iteration. */ return; case EFILE_STATE_BUSY: /* Schedule ourselves to be closed once the current operation * finishes, retrying the [IDLE -> CLOSED] transition in case we * narrowly passed the [BUSY -> IDLE] one. */ erts_atomic32_cmpxchg_nob(&d->state, EFILE_STATE_CLOSE_PENDING, EFILE_STATE_BUSY); break; } } }
BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3) { Port* prt; Eterm retval; Uint uint_op; unsigned int op; erts_aint32_t state; prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); if (!term_to_Uint(BIF_ARG_2, &uint_op)) BIF_RET(am_badarg); if (uint_op > (Uint) UINT_MAX) BIF_RET(am_badarg); op = (unsigned int) uint_op; switch (erts_port_call(BIF_P, prt, op, BIF_ARG_3, &retval)) { case ERTS_PORT_OP_DROPPED: case ERTS_PORT_OP_BADARG: retval = am_badarg; break; case ERTS_PORT_OP_SCHEDULED: ASSERT(is_internal_ordinary_ref(retval)); break; case ERTS_PORT_OP_DONE: ASSERT(is_not_internal_ref(retval)); break; default: ERTS_INTERNAL_ERROR("Unexpected erts_port_call() result"); retval = am_internal_error; break; } state = erts_atomic32_read_acqb(&BIF_P->state); if (state & ERTS_PSFLG_EXITING) ERTS_BIF_EXITED(BIF_P); BIF_RET(retval); }
static ERL_NIF_TERM pread_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { Sint64 bytes_read, block_size, offset; SysIOVec read_vec[1]; ErlNifBinary result; ASSERT(argc == 2); if(!enif_is_number(env, argv[0]) || !enif_is_number(env, argv[1])) { return enif_make_badarg(env); } if(!enif_get_int64(env, argv[0], &offset) || !enif_get_int64(env, argv[1], &block_size) || (offset < 0 || block_size < 0)) { return posix_error_to_tuple(env, EINVAL); } if(!enif_alloc_binary(block_size, &result)) { return posix_error_to_tuple(env, ENOMEM); } read_vec[0].iov_base = result.data; read_vec[0].iov_len = result.size; bytes_read = efile_preadv(d, offset, read_vec, 1); if(bytes_read < 0) { enif_release_binary(&result); return posix_error_to_tuple(env, d->posix_errno); } else if(bytes_read == 0) { enif_release_binary(&result); return am_eof; } if(bytes_read < block_size && !enif_realloc_binary(&result, bytes_read)) { ERTS_INTERNAL_ERROR("Failed to shrink pread result."); } return enif_make_tuple2(env, am_ok, enif_make_binary(env, &result)); }
BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2) { Eterm retval; Port* prt; if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) { prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_undefined); } else if (is_external_port(BIF_ARG_1)) { if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry) BIF_RET(am_undefined); else BIF_RET(am_badarg); } else { BIF_RET(am_badarg); } switch (erts_port_info(BIF_P, prt, BIF_ARG_2, &retval)) { case ERTS_PORT_OP_CALLER_EXIT: case ERTS_PORT_OP_BADARG: BIF_RET(am_badarg); case ERTS_PORT_OP_DROPPED: BIF_RET(am_undefined); case ERTS_PORT_OP_SCHEDULED: ASSERT(is_internal_ordinary_ref(retval)); BIF_RET(retval); case ERTS_PORT_OP_DONE: ASSERT(is_not_internal_ref(retval)); BIF_RET(retval); default: ERTS_INTERNAL_ERROR("Unexpected erts_port_info() result"); BIF_RET(am_internal_error); } }
int efile_seek(efile_data_t *d, enum efile_seek_t seek, Sint64 offset, Sint64 *new_position) { efile_win_t *w = (efile_win_t*)d; LARGE_INTEGER large_offset, large_new_position; DWORD whence; switch(seek) { case EFILE_SEEK_BOF: whence = FILE_BEGIN; break; case EFILE_SEEK_CUR: whence = FILE_CURRENT; break; case EFILE_SEEK_EOF: whence = FILE_END; break; default: ERTS_INTERNAL_ERROR("Invalid seek parameter"); } large_offset.QuadPart = offset; if(!SetFilePointerEx(w->handle, large_offset, &large_new_position, whence)) { w->common.posix_errno = windows_to_posix_errno(GetLastError()); return 0; } (*new_position) = large_new_position.QuadPart; return 1; }
Eterm erts_debug_make_unique_integer(Process *c_p, Eterm etval0, Eterm etval1) { Uint64 val0, val1; Uint hsz; Eterm res, *hp, *end_hp; if (!term_to_Uint64(etval0, &val0)) return THE_NON_VALUE; if (!term_to_Uint64(etval1, &val1)) return THE_NON_VALUE; bld_unique_integer_term(NULL, &hsz, val0, val1, 0); hp = HAlloc(c_p, hsz); end_hp = hp + hsz; res = bld_unique_integer_term(&hp, NULL, val0, val1, 0); if (hp != end_hp) ERTS_INTERNAL_ERROR("Heap allocation error"); return res; }
Eterm erts_change_message_queue_management(Process *c_p, Eterm new_state) { Eterm res; #ifdef DEBUG if (c_p->flags & F_OFF_HEAP_MSGQ) { ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { if (c_p->flags & F_OFF_HEAP_MSGQ_CHNG) { ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { ASSERT(!(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); } } #endif switch (c_p->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) { case F_OFF_HEAP_MSGQ: res = am_off_heap; switch (new_state) { case am_off_heap: break; case am_on_heap: c_p->flags |= F_ON_HEAP_MSGQ; c_p->flags &= ~F_OFF_HEAP_MSGQ; erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_ON_HEAP_MSGQ); /* * We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ * if a off heap change is ongoing. It will be adjusted * when the change completes... */ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) { /* Safe to clear ERTS_PSFLG_OFF_HEAP_MSGQ... */ erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); } break; default: res = THE_NON_VALUE; /* badarg */ break; } break; case F_ON_HEAP_MSGQ: res = am_on_heap; switch (new_state) { case am_on_heap: break; case am_off_heap: c_p->flags &= ~F_ON_HEAP_MSGQ; erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_ON_HEAP_MSGQ); goto change_to_off_heap; default: res = THE_NON_VALUE; /* badarg */ break; } break; default: res = am_error; ERTS_INTERNAL_ERROR("Inconsistent message queue management state"); break; } return res; change_to_off_heap: c_p->flags |= F_OFF_HEAP_MSGQ; /* * We do not have to schedule a change if * we have an ongoing off heap change... */ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) { ErtsChangeOffHeapMessageQueue *cohmq; /* * Need to set ERTS_PSFLG_OFF_HEAP_MSGQ and wait * thread progress before completing the change in * order to ensure that all senders observe that * messages should be passed off heap. When the * change has completed, GC does not need to inspect * the message queue at all. */ erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_OFF_HEAP_MSGQ); c_p->flags |= F_OFF_HEAP_MSGQ_CHNG; cohmq = erts_alloc(ERTS_ALC_T_MSGQ_CHNG, sizeof(ErtsChangeOffHeapMessageQueue)); cohmq->pid = c_p->common.id; erts_schedule_thr_prgr_later_op(change_off_heap_msgq, (void *) cohmq, &cohmq->lop); } return res; }
BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3) { BIF_RETTYPE res; Port *prt; int flags = 0; Eterm ref; if (is_not_nil(BIF_ARG_3)) { Eterm l = BIF_ARG_3; while (is_list(l)) { Eterm* cons = list_val(l); Eterm car = CAR(cons); if (car == am_force) flags |= ERTS_PORT_SIG_FLG_FORCE; else if (car == am_nosuspend) flags |= ERTS_PORT_SIG_FLG_NOSUSPEND; else BIF_RET(am_badarg); l = CDR(cons); } if (!is_nil(l)) BIF_RET(am_badarg); } prt = sig_lookup_port(BIF_P, BIF_ARG_1); if (!prt) BIF_RET(am_badarg); if (flags & ERTS_PORT_SIG_FLG_FORCE) { if (!(prt->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) BIF_RET(am_notsup); } #ifdef DEBUG ref = NIL; #endif switch (erts_port_output(BIF_P, flags, prt, prt->common.id, BIF_ARG_2, &ref)) { case ERTS_PORT_OP_CALLER_EXIT: case ERTS_PORT_OP_BADARG: case ERTS_PORT_OP_DROPPED: ERTS_BIF_PREP_RET(res, am_badarg); break; case ERTS_PORT_OP_BUSY: ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE)); if (flags & ERTS_PORT_SIG_FLG_NOSUSPEND) ERTS_BIF_PREP_RET(res, am_false); else { erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, prt); ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_erts_internal_port_command_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } break; case ERTS_PORT_OP_BUSY_SCHEDULED: ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE)); /* Fall through... */ case ERTS_PORT_OP_SCHEDULED: ASSERT(is_internal_ordinary_ref(ref)); ERTS_BIF_PREP_RET(res, ref); break; case ERTS_PORT_OP_DONE: ERTS_BIF_PREP_RET(res, am_true); break; default: ERTS_INTERNAL_ERROR("Unexpected erts_port_output() result"); break; } if (ERTS_PROC_IS_EXITING(BIF_P)) { KILL_CATCHES(BIF_P); /* Must exit */ ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR); } return res; }
/* This undocumented function reads a pointer and then reads the data block * described by said pointer. It was reverse-engineered from the old * implementation so while all tests pass it may not be entirely correct. Our * current understanding is as follows: * * Pointer layout: * * <<Size:1/integer-unit:32, Offset:1/integer-unit:32>> * * Where Offset is the -absolute- address to the data block. * * *) If we fail to read the pointer block in its entirety, we return eof. * *) If the provided max_payload_size is larger than Size, we return eof. * *) If we fail to read any data whatsoever at Offset, we return * {ok, {Size, Offset, eof}} * *) Otherwise, we return {ok, {Size, Offset, Data}}. Note that the size * of Data may be smaller than Size if we encounter EOF before we could * read the entire block. * * On errors we'll return {error, posix()} regardless of whether they * happened before or after reading the pointer block. */ static ERL_NIF_TERM ipread_s32bu_p32bu_nif_impl(efile_data_t *d, ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { Sint64 payload_offset, payload_size; SysIOVec read_vec[1]; Sint64 bytes_read; ErlNifBinary payload; if(argc != 2 || !enif_is_number(env, argv[0]) || !enif_is_number(env, argv[1])) { return enif_make_badarg(env); } { Sint64 max_payload_size, pointer_offset; unsigned char pointer_block[8]; if(!enif_get_int64(env, argv[0], &pointer_offset) || !enif_get_int64(env, argv[1], &max_payload_size) || (pointer_offset < 0 || max_payload_size >= 1u << 31)) { return posix_error_to_tuple(env, EINVAL); } read_vec[0].iov_base = pointer_block; read_vec[0].iov_len = sizeof(pointer_block); bytes_read = efile_preadv(d, pointer_offset, read_vec, 1); if(bytes_read < 0) { return posix_error_to_tuple(env, d->posix_errno); } else if(bytes_read < sizeof(pointer_block)) { return am_eof; } payload_size = (Uint32)get_int32(&pointer_block[0]); payload_offset = (Uint32)get_int32(&pointer_block[4]); if(payload_size > max_payload_size) { return am_eof; } } if(!enif_alloc_binary(payload_size, &payload)) { return posix_error_to_tuple(env, ENOMEM); } read_vec[0].iov_base = payload.data; read_vec[0].iov_len = payload.size; bytes_read = efile_preadv(d, payload_offset, read_vec, 1); if(bytes_read < 0) { return posix_error_to_tuple(env, d->posix_errno); } else if(bytes_read == 0) { enif_release_binary(&payload); return enif_make_tuple2(env, am_ok, enif_make_tuple3(env, enif_make_uint(env, payload_size), enif_make_uint(env, payload_offset), am_eof)); } if(bytes_read < payload.size && !enif_realloc_binary(&payload, bytes_read)) { ERTS_INTERNAL_ERROR("Failed to shrink ipread payload."); } return enif_make_tuple2(env, am_ok, enif_make_tuple3(env, enif_make_uint(env, payload_size), enif_make_uint(env, payload_offset), enif_make_binary(env, &payload))); }
static void *lc_core_alloc(void) { lc_unlock(); ERTS_INTERNAL_ERROR("Lock checker out of memory!\n"); }