static void release_update_permission(int release_updater) { erts_mtx_lock(&update_table_permission_mtx); ASSERT(updater_process != NULL); if (release_updater) { erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(updater_process)) { erts_resume(updater_process, ERTS_PROC_LOCK_STATUS); } erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS); } updater_process = NULL; while (update_queue != NULL) { /* Unleash the entire herd */ struct update_queue_item* qitem = update_queue; erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(qitem->p)) { erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS); } erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); update_queue = qitem->next; erts_proc_dec_refc(qitem->p); erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem); } erts_mtx_unlock(&update_table_permission_mtx); }
static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsProcLocks plocks, Uint flag) { Eterm r = NIL; Eterm immediate_tag = NIL; Eterm immediate_type = NIL; erts_driver_t *drv; ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); lock_drv_list(); if ((drv = lookup_driver(name)) == NULL) { immediate_tag = am_unloaded; immediate_type = am_DOWN; goto immediate; } if (drv->handle == NULL || drv->handle->status == ERL_DE_PERMANENT) { immediate_tag = am_permanent; immediate_type = am_UP; goto immediate; } p->flags |= F_USING_DDLL; r = add_monitor(p, drv->handle, flag); unlock_drv_list(); BIF_RET(r); immediate: r = erts_make_ref(p); erts_proc_unlock(p, plocks); notify_proc(p, r, name_term, immediate_type, immediate_tag, 0); unlock_drv_list(); erts_proc_lock(p, plocks); BIF_RET(r); }
Sint erts_complete_off_heap_message_queue_change(Process *c_p) { int reds = 1; ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); /* * This job was first initiated when the process changed to off heap * message queue management. Since then ERTS_PSFLG_OFF_HEAP_MSGQ * has been set. However, the management state might have been changed * again (multiple times) since then. Check users last requested state * (the flags F_OFF_HEAP_MSGQ, and F_ON_HEAP_MSGQ), and make the state * consistent with that. */ if (!(c_p->flags & F_OFF_HEAP_MSGQ)) erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); else { reds += 2; erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); ERTS_MSGQ_MV_INQ2PRIVQ(c_p); erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); reds += erts_move_messages_off_heap(c_p); } c_p->flags &= ~F_OFF_HEAP_MSGQ_CHNG; return reds; }
void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) { erts_driver_t *drv; erts_proc_unlock(p, plocks); lock_drv_list(); drv = driver_list; while (drv != NULL) { if (drv->handle != NULL && drv->handle->status != ERL_DE_PERMANENT) { DE_ProcEntry **pe = &(drv->handle->procs); while ((*pe) != NULL) { if ((*pe)->proc == p && ((*pe)->awaiting_status == ERL_DE_PROC_AWAIT_LOAD || (*pe)->awaiting_status == ERL_DE_PROC_AWAIT_UNLOAD || (*pe)->awaiting_status == ERL_DE_PROC_AWAIT_UNLOAD_ONLY) && eq(make_internal_ref(&((*pe)->heap)),ref)) { DE_ProcEntry *r = *pe; *pe = r->next; erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) r); goto done; } pe = &((*pe)->next); } } drv = drv->next; } done: unlock_drv_list(); erts_proc_lock(p, plocks); }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks rcvr_locks, ErtsDistExternal *dist_ext, Eterm token, Eterm from) { ErtsMessage* mp; erts_aint_t state; ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = erts_alloc_message(0, NULL); mp->data.dist_ext = dist_ext; ERL_MESSAGE_FROM(mp) = dist_ext->dep->sysname; ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; if (token == am_have_dt_utag) ERL_MESSAGE_TOKEN(mp) = NIL; else #endif ERL_MESSAGE_TOKEN(mp) = token; if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; ErtsProcLocks unlocks = rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (unlocks) { erts_proc_unlock(rcvr, unlocks); need_locks |= unlocks; } erts_proc_lock(rcvr, need_locks); } } state = erts_atomic32_read_acqb(&rcvr->state); if (state & ERTS_PSFLG_EXITING) { if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } else { LINK_MESSAGE(rcvr, mp); if (rcvr_locks & ERTS_PROC_LOCK_MAIN) erts_proc_sig_fetch(rcvr); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, rcvr_locks); } }
Uint erts_process_memory(Process *p, int include_sigs_in_transit) { Uint size = 0; struct saved_calls *scb; size += sizeof(Process); if ((erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) == 0) { erts_link_tree_foreach(ERTS_P_LINKS(p), link_size, (void *) &size); erts_monitor_tree_foreach(ERTS_P_MONITORS(p), monitor_size, (void *) &size); erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p), monitor_size, (void *) &size); } size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm); if (p->abandoned_heap) size += (p->hend - p->heap) * sizeof(Eterm); if (p->old_hend && p->old_heap) size += (p->old_hend - p->old_heap) * sizeof(Eterm); if (!include_sigs_in_transit) { /* * Size of message queue! * * Note that this assumes that any part of message * queue located in middle queue have been moved * into the inner queue prior to this call. * process_info() management ensures this is done- */ ErtsMessage *mp; for (mp = p->sig_qs.first; mp; mp = mp->next) { ASSERT(ERTS_SIG_IS_MSG((ErtsSignal *) mp)); size += sizeof(ErtsMessage); if (mp->data.attached) size += erts_msg_attached_data_size(mp) * sizeof(Eterm); } } else { /* * Size of message queue plus size of all signals * in transit to the process! */ erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ); erts_proc_sig_fetch(p); erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ); ERTS_FOREACH_SIG_PRIVQS( p, mp, { size += sizeof(ErtsMessage); if (ERTS_SIG_IS_NON_MSG((ErtsSignal *) mp)) size += erts_proc_sig_signal_size((ErtsSignal *) mp); else if (mp->data.attached) size += erts_msg_attached_data_size(mp) * sizeof(Eterm); }); }
/* * Utilities */ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsProcLocks plocks) { Eterm r = NIL; Eterm immediate_tag = NIL; Eterm immediate_type = NIL; erts_driver_t *drv; ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); lock_drv_list(); if ((drv = lookup_driver(name)) == NULL) { immediate_tag = am_unloaded; immediate_type = am_DOWN; goto immediate; } if (drv->handle == NULL || drv->handle->status == ERL_DE_PERMANENT) { immediate_tag = am_permanent; immediate_type = am_UP; goto immediate; } switch (drv->handle->status) { case ERL_DE_OK: immediate_tag = am_loaded; immediate_type = am_UP; goto immediate; case ERL_DE_UNLOAD: case ERL_DE_FORCE_UNLOAD: immediate_tag = am_load_cancelled; immediate_type = am_DOWN; goto immediate; case ERL_DE_RELOAD: case ERL_DE_FORCE_RELOAD: break; default: erts_exit(ERTS_ERROR_EXIT,"Internal error, unknown state %u in dynamic driver.", drv->handle->status); } p->flags |= F_USING_DDLL; r = add_monitor(p, drv->handle, ERL_DE_PROC_AWAIT_LOAD); unlock_drv_list(); BIF_RET(r); immediate: r = erts_make_ref(p); erts_proc_unlock(p, plocks); notify_proc(p, r, name_term, immediate_type, immediate_tag, 0); unlock_drv_list(); erts_proc_lock(p, plocks); BIF_RET(r); }
static void thr_prg_wake_up_later(void* bin_p) { Binary* bin = bin_p; ErtsFlxCtrWakeUpLaterInfo* info = ERTS_MAGIC_BIN_DATA(bin); Process* p = info->process; /* Resume the requesting process */ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); /* Free data */ erts_proc_dec_refc(p); erts_bin_release(bin); }
static void thr_prg_wake_up_and_count(void* bin_p) { Binary* bin = bin_p; DecentralizedReadSnapshotInfo* info = ERTS_MAGIC_BIN_DATA(bin); Process* p = info->process; ErtsFlxCtrDecentralizedCtrArray* array = info->array; ErtsFlxCtrDecentralizedCtrArray* next = info->next_array; int i, sched; /* Reset result array */ for (i = 0; i < info->nr_of_counters; i++) { info->result[i] = 0; } /* Read result from snapshot */ for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) { for (i = 0; i < info->nr_of_counters; i++) { info->result[i] = info->result[i] + erts_atomic_read_nob(&array->array[sched].counters[i]); } } /* Update the next decentralized counter array */ for (i = 0; i < info->nr_of_counters; i++) { erts_atomic_add_nob(&next->array[0].counters[i], info->result[i]); } /* Announce that the snapshot is done */ { Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING; if (expected != erts_atomic_cmpxchg_mb(&next->snapshot_status, ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING, expected)) { /* The CAS failed which means that this thread need to free the next array. */ erts_free(info->alloc_type, next->block_start); } } /* Resume the process that requested the snapshot */ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } /* Free the memory that is no longer needed */ erts_free(info->alloc_type, array->block_start); erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); erts_bin_release(bin); }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks rcvr_locks, ErtsDistExternal *dist_ext, Eterm token, Eterm from) { ErtsMessage* mp; #ifdef USE_VM_PROBES Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif erts_aint_t state; ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = erts_alloc_message(0, NULL); mp->data.dist_ext = dist_ext; ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; if (token == am_have_dt_utag) ERL_MESSAGE_TOKEN(mp) = NIL; else #endif ERL_MESSAGE_TOKEN(mp) = token; if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; ErtsProcLocks unlocks = rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (unlocks) { erts_proc_unlock(rcvr, unlocks); need_locks |= unlocks; } erts_proc_lock(rcvr, need_locks); } } state = erts_atomic32_read_acqb(&rcvr->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } else if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { if (from == am_Empty) from = dist_ext->dep->sysname; /* Ahh... need to decode it in order to trace it... */ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0)) erts_free_message(mp); else { Eterm msg = ERL_MESSAGE_TERM(mp); token = ERL_MESSAGE_TOKEN(mp); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (have_seqtrace(token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } DTRACE6(message_queued, receiver_name, size_object(msg), rcvr->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif erts_queue_message(rcvr, rcvr_locks, mp, msg, from); } } else { /* Enqueue message on external format */ #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(rcvr, receiver_name); if (have_seqtrace(token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token)); } /* * TODO: We don't know the real size of the external message here. * -1 will appear to a D script as 4294967295. */ DTRACE6(message_queued, receiver_name, -1, rcvr->msg.len + 1, tok_label, tok_lastcnt, tok_serial); } #endif LINK_MESSAGE(rcvr, mp, &mp->next, 1); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, rcvr_locks ); } }
/* * Called from erl_process.c. */ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) { erts_driver_t *drv; erts_proc_unlock(p, plocks); lock_drv_list(); drv = driver_list; while (drv != NULL) { if (drv->handle != NULL && drv->handle->status != ERL_DE_PERMANENT) { DE_ProcEntry **pe = &(drv->handle->procs); int kill_ports = (drv->handle->flags & ERL_DE_FL_KILL_PORTS); int left = 0; while ((*pe) != NULL) { if ((*pe)->proc == p) { DE_ProcEntry *r = *pe; *pe = r->next; if (!(r->flags & ERL_DE_FL_DEREFERENCED) && r->awaiting_status == ERL_DE_PROC_LOADED) { erts_ddll_dereference_driver(drv->handle); } erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) r); } else { if ((*pe)->awaiting_status == ERL_DE_PROC_LOADED) { ++left; } pe = &((*pe)->next); } } if (!left) { DE_Handle *dh = drv->handle; if (dh->status == ERL_DE_RELOAD || dh->status == ERL_DE_FORCE_RELOAD) { notify_all(dh, drv->name, ERL_DE_PROC_AWAIT_LOAD, am_DOWN, am_load_cancelled); erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_full_path); erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_driver_name); dh->reload_full_path = dh->reload_driver_name = NULL; dh->reload_flags = 0; } dh->status = ERL_DE_UNLOAD; } if (!left && erts_atomic32_read_nob(&drv->handle->port_count) > 0) { if (kill_ports) { DE_Handle *dh = drv->handle; erts_ddll_reference_driver(dh); dh->status = ERL_DE_FORCE_UNLOAD; unlock_drv_list(); kill_ports_driver_unloaded(dh); lock_drv_list(); /* Needed for future list operations */ drv = drv->next; /* before allowing destruction */ erts_ddll_dereference_driver(dh); } else { drv = drv->next; } } else { drv = drv->next; } } else { drv = drv->next; } } unlock_drv_list(); erts_proc_lock(p, plocks); }
/* Add messages last in message queue */ static void queue_messages(Process* receiver, ErtsProcLocks receiver_locks, ErtsMessage* first, ErtsMessage** last, Uint len) { int locked_msgq = 0; erts_aint32_t state; #ifdef DEBUG { ErtsMessage* fmsg = ERTS_SIG_IS_MSG(first) ? first : first->next; ASSERT(fmsg); ASSERT(is_value(ERL_MESSAGE_TERM(fmsg))); ASSERT(is_value(ERL_MESSAGE_FROM(fmsg))); ASSERT(ERL_MESSAGE_TOKEN(fmsg) == am_undefined || ERL_MESSAGE_TOKEN(fmsg) == NIL || is_tuple(ERL_MESSAGE_TOKEN(fmsg))); } #endif ERTS_LC_ASSERT((erts_proc_lc_my_proc_locks(receiver) & ERTS_PROC_LOCK_MSGQ) == (receiver_locks & ERTS_PROC_LOCK_MSGQ)); if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { erts_proc_lock(receiver, ERTS_PROC_LOCK_MSGQ); locked_msgq = 1; } state = erts_atomic32_read_nob(&receiver->state); if (state & ERTS_PSFLG_EXITING) { /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); if (ERTS_SIG_IS_NON_MSG(first)) { ErtsSchedulerData* esdp = erts_get_scheduler_data(); ASSERT(esdp); ASSERT(!esdp->pending_signal.sig); esdp->pending_signal.sig = (ErtsSignal*) first; esdp->pending_signal.to = receiver->common.id; first = first->next; } erts_cleanup_messages(first); return; } if (last == &first->next) { ASSERT(len == 1); LINK_MESSAGE(receiver, first); } else { erts_enqueue_signals(receiver, first, last, NULL, len, state); } if (receiver_locks & ERTS_PROC_LOCK_MAIN) erts_proc_sig_fetch(receiver); if (locked_msgq) { erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } if (last == &first->next) erts_proc_notify_new_message(receiver, receiver_locks); else erts_proc_notify_new_sig(receiver, state, ERTS_PSFLG_ACTIVE); }
/* You have to have loaded the driver and the pid state is LOADED or AWAIT_LOAD. You will be removed from the list regardless of driver state. If the driver is loaded by someone else to, return is {ok, pending_process} If the driver is loaded but locked by a port, return is {ok, pending_driver} If the driver is loaded and free to unload (you're the last holding it) {ok, unloaded} If it's not loaded or not loaded by you {error, not_loaded} or {error, not_loaded_by_you} Internally, if its in state UNLOADING, just return {ok, pending_driver} and remove/decrement this pid (which should be an LOADED tagged one). If the state is RELOADING, this pid should be in list as LOADED tagged, only AWAIT_LOAD would be possible but not allowed for unloading, remove it and, if the last LOADED tagged, change from RELOAD to UNLOAD and notify any AWAIT_LOAD-waiters with {'DOWN', ref(), driver, name(), load_cancelled} If the driver made itself permanent, {'UP', ref(), driver, name(), permanent} */ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) { Eterm name_term = BIF_ARG_1; Eterm options = BIF_ARG_2; char *name = NULL; Eterm ok_term = NIL; Eterm soft_error_term = NIL; erts_driver_t *drv; DE_Handle *dh; DE_ProcEntry *pe; Eterm *hp; Eterm t; int monitor = 0; Eterm l; int kill_ports = 0; erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); for(l = options; is_list(l); l = CDR(list_val(l))) { Eterm opt = CAR(list_val(l)); Eterm *tp; if (is_not_tuple(opt)) { if (opt == am_kill_ports) { kill_ports = 1; continue; } else { goto error; } } tp = tuple_val(opt); if (*tp != make_arityval(2) || tp[1] != am_monitor) { goto error; } if (tp[2] == am_pending_driver) { monitor = 1; } else if (tp[2] == am_pending) { monitor = 2; } else { goto error; } } if (is_not_nil(l)) { goto error; } if ((name = pick_list_or_atom(name_term)) == NULL) { goto error; } lock_drv_list(); if ((drv = lookup_driver(name)) == NULL) { soft_error_term = am_not_loaded; goto soft_error; } if (drv->handle == NULL) { soft_error_term = am_linked_in_driver; goto soft_error; } else if (drv->handle->status == ERL_DE_PERMANENT) { soft_error_term = am_permanent; goto soft_error; } dh = drv->handle; if (dh->flags & ERL_DE_FL_KILL_PORTS) { kill_ports = 1; } if ((pe = find_proc_entry(dh, BIF_P, ERL_DE_PROC_LOADED)) == NULL) { if (num_procs(dh, ERL_DE_PROC_LOADED) > 0) { soft_error_term = am_not_loaded_by_this_process; goto soft_error; } } else { remove_proc_entry(dh, pe); if (!(pe->flags & ERL_DE_FL_DEREFERENCED)) { erts_ddll_dereference_driver(dh); } erts_free(ERTS_ALC_T_DDLL_PROCESS, pe); } if (num_procs(dh, ERL_DE_PROC_LOADED) > 0) { ok_term = am_pending_process; --monitor; goto done; } if (dh->status == ERL_DE_RELOAD || dh->status == ERL_DE_FORCE_RELOAD) { notify_all(dh, drv->name, ERL_DE_PROC_AWAIT_LOAD, am_DOWN, am_load_cancelled); erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_full_path); erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_driver_name); dh->reload_full_path = dh->reload_driver_name = NULL; dh->reload_flags = 0; } if (erts_atomic32_read_nob(&dh->port_count) > 0) { ++kill_ports; } dh->status = ERL_DE_UNLOAD; ok_term = am_pending_driver; done: assert_drv_list_rwlocked(); if (kill_ports > 1) { /* Avoid closing the driver by referencing it */ erts_ddll_reference_driver(dh); dh->status = ERL_DE_FORCE_UNLOAD; unlock_drv_list(); kill_ports_driver_unloaded(dh); lock_drv_list(); erts_ddll_dereference_driver(dh); } erts_ddll_reference_driver(dh); unlock_drv_list(); erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); erts_ddll_dereference_driver(dh); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); BIF_P->flags |= F_USING_DDLL; if (monitor > 0) { Eterm mref = add_monitor(BIF_P, dh, ERL_DE_PROC_AWAIT_UNLOAD); hp = HAlloc(BIF_P, 4); t = TUPLE3(hp, am_ok, ok_term, mref); } else { hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_ok, ok_term); } if (kill_ports > 1) { ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */ } unlock_drv_list(); BIF_RET(t); soft_error: unlock_drv_list(); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_error, soft_error_term); BIF_RET(t); error: /* No lock fiddling before going here */ assert_drv_list_not_locked(); if (name != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); } erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_ERROR(BIF_P, BADARG); }
/* * Try to load. If the driver is OK, add as LOADED. If the driver is * UNLOAD, possibly change to reload and add as LOADED, * there should be no other * LOADED tagged pid's. If the driver is RELOAD then add/increment as * LOADED (should be some LOADED pid). If the driver is not present, * really load and add as LOADED {ok,loaded} {ok,pending_driver} * {error, permanent} {error,load_error()} */ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) { Eterm path_term = BIF_ARG_1; Eterm name_term = BIF_ARG_2; Eterm options = BIF_ARG_3; char *path = NULL; Sint path_len; char *name = NULL; DE_Handle *dh; erts_driver_t *drv; int res; Eterm soft_error_term = NIL; Eterm ok_term = NIL; Eterm *hp; Eterm t; int monitor = 0; int reload = 0; Eterm l; Uint flags = 0; int kill_ports = 0; int do_build_load_error = 0; int build_this_load_error = 0; int encoding; for(l = options; is_list(l); l = CDR(list_val(l))) { Eterm opt = CAR(list_val(l)); Eterm *tp; if (is_not_tuple(opt)) { goto error; } tp = tuple_val(opt); if (*tp != make_arityval(2) || is_not_atom(tp[1])) { goto error; } switch (tp[1]) { case am_driver_options: { Eterm ll; for(ll = tp[2]; is_list(ll); ll = CDR(list_val(ll))) { Eterm dopt = CAR(list_val(ll)); if (dopt == am_kill_ports) { flags |= ERL_DE_FL_KILL_PORTS; } else { goto error; } } if (is_not_nil(ll)) { goto error; } } break; case am_monitor: if (tp[2] == am_pending_driver) { monitor = 1; } else if (tp[2] == am_pending ) { monitor = 2; } else { goto error; } break; case am_reload: if (tp[2] == am_pending_driver) { reload = 1; } else if (tp[2] == am_pending ) { reload = 2; } else { goto error; } break; default: goto error; } } if (is_not_nil(l)) { goto error; } if ((name = pick_list_or_atom(name_term)) == NULL) { goto error; } encoding = erts_get_native_filename_encoding(); if (encoding == ERL_FILENAME_WIN_WCHAR) { /* Do not convert the lib name to utf-16le yet, do that in win32 specific code */ /* since lib_name is used in error messages */ encoding = ERL_FILENAME_UTF8; } path = erts_convert_filename_to_encoding(path_term, NULL, 0, ERTS_ALC_T_DDLL_TMP_BUF, 1, 0, encoding, &path_len, sys_strlen(name) + 2); /* might need path separator */ if (!path) { goto error; } ASSERT(path_len > 0 && path[path_len-1] == 0); while (--path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/')) ; path[path_len++] = '/'; sys_strcpy(path+path_len,name); erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); if ((drv = lookup_driver(name)) != NULL) { if (drv->handle == NULL) { /* static_driver */ soft_error_term = am_linked_in_driver; goto soft_error; } else { dh = drv->handle; if (dh->status == ERL_DE_OK) { int is_last = is_last_user(dh, BIF_P); if (reload == 1 && !is_last) { /*Want reload if no other users, but there are others...*/ soft_error_term = am_pending_process; goto soft_error; } if (reload != 0) { DE_ProcEntry *old; if ((dh->flags & ERL_FL_CONSISTENT_MASK) != (flags & ERL_FL_CONSISTENT_MASK)) { soft_error_term = am_inconsistent; goto soft_error; } if ((old = find_proc_entry(dh, BIF_P, ERL_DE_PROC_LOADED)) == NULL) { soft_error_term = am_not_loaded_by_this_process; goto soft_error; } else { remove_proc_entry(dh, old); erts_ddll_dereference_driver(dh); erts_free(ERTS_ALC_T_DDLL_PROCESS, old); } /* Reload requested and granted */ dereference_all_processes(dh); set_driver_reloading(dh, BIF_P, path, name, flags); if (dh->flags & ERL_DE_FL_KILL_PORTS) { kill_ports = 1; } ok_term = (reload == 1) ? am_pending_driver : am_pending_process; } else { /* Already loaded and healthy (might be by me) */ if (sys_strcmp(dh->full_path, path) || (dh->flags & ERL_FL_CONSISTENT_MASK) != (flags & ERL_FL_CONSISTENT_MASK)) { soft_error_term = am_inconsistent; goto soft_error; } add_proc_loaded(dh, BIF_P); erts_ddll_reference_driver(dh); monitor = 0; ok_term = mkatom("already_loaded"); } } else if (dh->status == ERL_DE_UNLOAD || dh->status == ERL_DE_FORCE_UNLOAD) { /* pending driver */ if (reload != 0) { soft_error_term = am_not_loaded_by_this_process; goto soft_error; } if (sys_strcmp(dh->full_path, path) || (dh->flags & ERL_FL_CONSISTENT_MASK) != (flags & ERL_FL_CONSISTENT_MASK)) { soft_error_term = am_inconsistent; goto soft_error; } dh->status = ERL_DE_OK; notify_all(dh, drv->name, ERL_DE_PROC_AWAIT_UNLOAD, am_UP, am_unload_cancelled); add_proc_loaded(dh, BIF_P); erts_ddll_reference_driver(dh); monitor = 0; ok_term = mkatom("already_loaded"); } else if (dh->status == ERL_DE_RELOAD || dh->status == ERL_DE_FORCE_RELOAD) { if (reload != 0) { soft_error_term = am_pending_reload; goto soft_error; } if (sys_strcmp(dh->reload_full_path, path) || (dh->reload_flags & ERL_FL_CONSISTENT_MASK) != (flags & ERL_FL_CONSISTENT_MASK)) { soft_error_term = am_inconsistent; goto soft_error; } /* Load of granted unload... */ /* Don't reference, will happen after reload */ add_proc_loaded_deref(dh, BIF_P); ++monitor; ok_term = am_pending_driver; } else { /* ERL_DE_PERMANENT */ soft_error_term = am_permanent; goto soft_error; } } } else { /* driver non-existing */ if (reload != 0) { soft_error_term = am_not_loaded; goto soft_error; } if ((res = load_driver_entry(&dh, path, name)) != ERL_DE_NO_ERROR) { build_this_load_error = res; do_build_load_error = 1; soft_error_term = am_undefined; goto soft_error; } else { dh->flags = flags; add_proc_loaded(dh, BIF_P); first_ddll_reference(dh); monitor = 0; ok_term = mkatom("loaded"); } } assert_drv_list_rwlocked(); if (kill_ports) { /* Avoid closing the driver by referencing it */ erts_ddll_reference_driver(dh); ASSERT(dh->status == ERL_DE_RELOAD); dh->status = ERL_DE_FORCE_RELOAD; unlock_drv_list(); kill_ports_driver_unloaded(dh); /* Dereference, eventually causing driver destruction */ lock_drv_list(); erts_ddll_dereference_driver(dh); } erts_ddll_reference_driver(dh); unlock_drv_list(); erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); erts_ddll_dereference_driver(dh); BIF_P->flags |= F_USING_DDLL; if (monitor) { Eterm mref = add_monitor(BIF_P, dh, ERL_DE_PROC_AWAIT_LOAD); hp = HAlloc(BIF_P, 4); t = TUPLE3(hp, am_ok, ok_term, mref); } else { hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_ok, ok_term); } unlock_drv_list(); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); BIF_RET(t); soft_error: unlock_drv_list(); erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); if (do_build_load_error) { soft_error_term = build_load_error(BIF_P, build_this_load_error); } hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_error, soft_error_term); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); BIF_RET(t); error: assert_drv_list_not_locked(); ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); if (path != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); } if (name != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); } BIF_ERROR(BIF_P, BADARG); }
Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) { unsigned result; Eterm reds_in = p->def_arg_reg[5]; /* * Process is in the normal case scheduled out when reduction * count reach zero. When "save calls" is enabled reduction * count is subtracted with CONTEXT_REDS, i.e. initial reduction * count will be zero or less and process is scheduled out * when -CONTEXT_REDS is reached. * * HiPE does not support the "save calls" feature, so we switch * to using a positive reduction counter when executing in * hipe mode, but need to restore the "save calls" when * returning to beam. We also need to hide the save calls buffer * from BIFs. We do that by moving the saved calls buf to * suspended saved calls buf. * * Beam has initial reduction count in stored in p->def_arg_reg[5]. * * Beam expects -neg_o_reds to be found in p->def_arg_reg[4] * on return to beam. */ { struct saved_calls *scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL); if (scb) { reds_in += CONTEXT_REDS; p->fcalls += CONTEXT_REDS; ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, scb); } } p->flags |= F_HIPE_MODE; /* inform bifs where we are comming from... */ p->i = NULL; /* Set current_function to undefined. stdlib hibernate tests rely on it. */ p->current = NULL; DPRINTF("cmd == %#x (%s)", cmd, code_str(cmd)); HIPE_CHECK_PCB(p); p->arity = 0; switch (cmd & 0xFF) { case HIPE_MODE_SWITCH_CMD_CALL: { /* BEAM calls a native code function */ unsigned arity = cmd >> 8; /* p->hipe.u.ncallee set in beam_emu */ if (p->cp == hipe_beam_pc_return) { /* Native called BEAM, which now tailcalls native. */ hipe_pop_beam_trap_frame(p); result = hipe_tailcall_to_native(p, arity, reg); break; } DPRINTF("calling %#lx/%u", (long)p->hipe.u.ncallee, arity); result = hipe_call_to_native(p, arity, reg); break; } case HIPE_MODE_SWITCH_CMD_CALL_CLOSURE: { /* BEAM calls a native code closure */ unsigned arity = cmd >> 8; /* #formals + #fvs (closure not counted) */ Eterm fun; ErlFunThing *funp; /* drop the fvs, move the closure, correct arity */ fun = reg[arity]; HIPE_ASSERT(is_fun(fun)); funp = (ErlFunThing*)fun_val(fun); HIPE_ASSERT(funp->num_free <= arity); arity -= funp->num_free; /* arity == #formals */ reg[arity] = fun; ++arity; /* correct for having added the closure */ /* HIPE_ASSERT(p->hipe.u.ncallee == (void(*)(void))funp->native_address); */ /* just like a normal call from now on */ /* p->hipe.u.ncallee set in beam_emu */ if (p->cp == hipe_beam_pc_return) { /* Native called BEAM, which now tailcalls native. */ hipe_pop_beam_trap_frame(p); result = hipe_tailcall_to_native(p, arity, reg); break; } DPRINTF("calling %#lx/%u", (long)p->hipe.u.ncallee, arity); result = hipe_call_to_native(p, arity, reg); break; } case HIPE_MODE_SWITCH_CMD_THROW: { /* BEAM just executed hipe_beam_pc_throw[] */ /* Native called BEAM, which now throws an exception back to native. */ DPRINTF("beam throws freason %#lx fvalue %#lx", p->freason, p->fvalue); hipe_pop_beam_trap_frame(p); do_throw_to_native: p->def_arg_reg[0] = exception_tag[GET_EXC_CLASS(p->freason)]; hipe_find_handler(p); result = hipe_throw_to_native(p); break; } case HIPE_MODE_SWITCH_CMD_RETURN: { /* BEAM just executed hipe_beam_pc_return[] */ /* Native called BEAM, which now returns back to native. */ /* pop trap frame off estack */ hipe_pop_beam_trap_frame(p); p->def_arg_reg[0] = reg[0]; result = hipe_return_to_native(p); break; } do_resume: case HIPE_MODE_SWITCH_CMD_RESUME: { /* BEAM just executed hipe_beam_pc_resume[] */ /* BEAM called native, which suspended. */ if (p->flags & F_TIMO) { /* XXX: The process will immediately execute 'clear_timeout', repeating these two statements. Remove them? */ p->flags &= ~F_TIMO; JOIN_MESSAGE(p); p->def_arg_reg[0] = 0; /* make_small(0)? */ } else p->def_arg_reg[0] = 1; /* make_small(1)? */ result = hipe_return_to_native(p); break; } default: erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: cmd %#x\r\n", cmd); } do_return_from_native: DPRINTF("result == %#x (%s)", result, code_str(result)); HIPE_CHECK_PCB(p); switch (result) { case HIPE_MODE_SWITCH_RES_RETURN: { hipe_return_from_native(p); reg[0] = p->def_arg_reg[0]; DPRINTF("returning with r(0) == %#lx", reg[0]); break; } case HIPE_MODE_SWITCH_RES_THROW: { DPRINTF("native throws freason %#lx fvalue %#lx", p->freason, p->fvalue); hipe_throw_from_native(p); break; } case HIPE_MODE_SWITCH_RES_TRAP: { /* * Native code called a BIF, which "failed" with a TRAP to BEAM. * Prior to returning, the BIF stored (see BIF_TRAP<N>): * the callee's address in p->i * the callee's parameters in reg[0..2] * the callee's arity in p->arity (for BEAM gc purposes) * * We need to remove the BIF's parameters from the native * stack: to this end hipe_${ARCH}_glue.S stores the BIF's * arity in p->hipe.narity. * * If the BIF emptied the stack (typically hibernate), p->hipe.nstack * is NULL and there is no need to get rid of stacked parameters. */ unsigned int i, is_recursive = 0; if (p->hipe.nstack != NULL) { ASSERT(p->hipe.nsp != NULL); is_recursive = hipe_trap_from_native_is_recursive(p); } else { /* Some architectures (risc) need this re-reset of nsp as the * BIF wrapper do not detect stack change and causes an obsolete * stack pointer to be saved in p->hipe.nsp before return to us. */ p->hipe.nsp = NULL; } /* Schedule next process if current process was hibernated or is waiting for messages */ if (p->flags & F_HIBERNATE_SCHED) { p->flags &= ~F_HIBERNATE_SCHED; goto do_schedule; } if (!(erts_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) { for (i = 0; i < p->arity; ++i) p->arg_reg[i] = reg[i]; goto do_schedule; } if (is_recursive) hipe_push_beam_trap_frame(p, reg, p->arity); result = HIPE_MODE_SWITCH_RES_CALL_BEAM; break; } case HIPE_MODE_SWITCH_RES_CALL_EXPORTED: { /* Native code calls or tailcalls BEAM. * * p->hipe.u.callee_exp is the callee's export entry * p->arity is the callee's arity * p->def_arg_reg[] contains the register parameters * p->hipe.nsp[] contains the stacked parameters */ if (hipe_call_from_native_is_recursive(p, reg)) { /* BEAM called native, which now calls BEAM */ hipe_push_beam_trap_frame(p, reg, p->arity); } break; } case HIPE_MODE_SWITCH_RES_CALL_CLOSURE: { /* Native code calls or tailcalls a closure in BEAM * * In native code a call to a closure of arity n looks like * F(A1, ..., AN, Closure), * The BEAM code for a closure expects to get: * F(A1, ..., AN, FV1, ..., FVM, Closure) * (Where Ai is argument i and FVj is free variable j) * * p->hipe.u.closure contains the closure * p->def_arg_reg[] contains the register parameters * p->hipe.nsp[] contains the stacked parameters */ ErlFunThing *closure; unsigned num_free, arity, i, is_recursive; HIPE_ASSERT(is_fun(p->hipe.u.closure)); closure = (ErlFunThing*)fun_val(p->hipe.u.closure); num_free = closure->num_free; arity = closure->fe->arity; /* Store the arity in p->arity for the stack popping. */ /* Note: we already have the closure so only need to move arity values to reg[]. However, there are arity+1 parameters in the native code state that need to be removed. */ p->arity = arity+1; /* +1 for the closure */ /* Get parameters, don't do GC just yet. */ is_recursive = hipe_call_from_native_is_recursive(p, reg); if ((Sint)closure->fe->address[-1] < 0) { /* Unloaded. Let beam_emu.c:call_fun() deal with it. */ result = HIPE_MODE_SWITCH_RES_CALL_CLOSURE; } else { /* The BEAM code is present. Prepare to call it. */ /* Append the free vars after the actual parameters. */ for (i = 0; i < num_free; ++i) reg[arity+i] = closure->env[i]; /* Update arity to reflect the new parameters. */ arity += i; /* Make a call to the closure's BEAM code. */ p->i = closure->fe->address; /* Change result code to the faster plain CALL type. */ result = HIPE_MODE_SWITCH_RES_CALL_BEAM; } /* Append the closure as the last parameter. Don't increment arity. */ reg[arity] = p->hipe.u.closure; if (is_recursive) { /* BEAM called native, which now calls BEAM. Need to put a trap-frame on the beam stack. This may cause GC, which is safe now that the arguments, free vars, and most importantly the closure, all are in reg[]. */ hipe_push_beam_trap_frame(p, reg, arity+1); } break; } case HIPE_MODE_SWITCH_RES_SUSPEND: { p->i = hipe_beam_pc_resume; p->arity = 0; goto do_schedule; } case HIPE_MODE_SWITCH_RES_WAIT: case HIPE_MODE_SWITCH_RES_WAIT_TIMEOUT: { /* same semantics, different debug trace messages */ /* XXX: BEAM has different entries for the locked and unlocked cases. HiPE doesn't, so we must check dynamically. */ if (p->flags & F_HIPE_RECV_LOCKED) p->flags &= ~F_HIPE_RECV_LOCKED; else erts_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); p->i = hipe_beam_pc_resume; p->arity = 0; if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_ACTIVE); else if (!(p->flags & F_HIPE_RECV_YIELD)) erts_atomic32_read_band_relb(&p->state, ~ERTS_PSFLG_ACTIVE); else { /* Yielded from receive */ ERTS_VBUMP_ALL_REDS(p); p->flags &= ~F_HIPE_RECV_YIELD; } erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); do_schedule: { struct saved_calls *scb; scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, NULL); if (scb) ERTS_PROC_SET_SAVED_CALLS_BUF(p, scb); /* The process may have died while it was executing, if so we return out from native code to the interpreter */ if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) p->i = beam_exit; #ifdef DEBUG ASSERT(p->debug_reds_in == reds_in); #endif p->flags &= ~F_HIPE_MODE; ERTS_UNREQ_PROC_MAIN_LOCK(p); p = erts_schedule(NULL, p, reds_in - p->fcalls); ERTS_REQ_PROC_MAIN_LOCK(p); ASSERT(!(p->flags & F_HIPE_MODE)); p->flags &= ~F_HIPE_RECV_LOCKED; reg = p->scheduler_data->x_reg_array; } { Eterm *argp; int i; argp = p->arg_reg; for (i = p->arity; --i >= 0;) reg[i] = argp[i]; } { struct saved_calls *scb; reds_in = p->fcalls; p->def_arg_reg[5] = reds_in; #ifdef DEBUG p->debug_reds_in = reds_in; #endif if (p->i == hipe_beam_pc_resume) { p->flags |= F_HIPE_MODE; /* inform bifs where we are comming from... */ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL); if (scb) ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, scb); p->i = NULL; p->arity = 0; goto do_resume; } scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p); if (!scb) p->def_arg_reg[4] = 0; else { p->def_arg_reg[4] = CONTEXT_REDS; p->fcalls = -CONTEXT_REDS + reds_in; } } HIPE_CHECK_PCB(p); result = HIPE_MODE_SWITCH_RES_CALL_BEAM; p->def_arg_reg[3] = result; return p; } case HIPE_MODE_SWITCH_RES_APPLY: { Eterm mfa[3], args; unsigned int arity; void *address; hipe_pop_params(p, 3, &mfa[0]); /* Unroll the arglist onto reg[]. */ args = mfa[2]; arity = 0; while (is_list(args)) { if (arity < 255) { reg[arity++] = CAR(list_val(args)); args = CDR(list_val(args)); } else goto do_apply_fail; } if (is_not_nil(args)) goto do_apply_fail; /* find a native code entry point for {M,F,A} for a remote call */ address = hipe_get_remote_na(mfa[0], mfa[1], arity); if (!address) goto do_apply_fail; p->hipe.u.ncallee = (void(*)(void)) address; result = hipe_tailcall_to_native(p, arity, reg); goto do_return_from_native; do_apply_fail: p->freason = BADARG; goto do_throw_to_native; } default: erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: result %#x\r\n", result); } { struct saved_calls *scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, NULL); if (!scb) p->def_arg_reg[4] = 0; else { p->def_arg_reg[4] = CONTEXT_REDS; p->fcalls -= CONTEXT_REDS; ERTS_PROC_SET_SAVED_CALLS_BUF(p, scb); } } HIPE_CHECK_PCB(p); p->def_arg_reg[3] = result; #if NR_ARG_REGS > 5 /* * When NR_ARG_REGS > 5, we need to protect the process' input * reduction count (which BEAM stores in def_arg_reg[5]) from * being clobbered by the arch glue code. */ p->def_arg_reg[5] = reds_in; #endif p->flags &= ~F_HIPE_MODE; return p; }
BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) { Port *port; Eterm res; char *str; int err_type, err_num; port = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_type, &err_num); if (!port) { if (err_type == -3) { ASSERT(err_num == BADARG || err_num == SYSTEM_LIMIT); if (err_num == BADARG) res = am_badarg; else if (err_num == SYSTEM_LIMIT) res = am_system_limit; else /* this is only here to silence gcc, it should not happen */ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); } else if (err_type == -2) { str = erl_errno_id(err_num); res = erts_atom_put((byte *) str, strlen(str), ERTS_ATOM_ENC_LATIN1, 1); } else { res = am_einval; } BIF_RET(res); } if (port->drv_ptr->flags & ERL_DRV_FLAG_USE_INIT_ACK) { /* Copied from erl_port_task.c */ port->async_open_port = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(*port->async_open_port)); erts_make_ref_in_array(port->async_open_port->ref); port->async_open_port->to = BIF_P->common.id; erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); if (ERTS_PROC_PENDING_EXIT(BIF_P)) { /* need to exit caller instead */ erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); KILL_CATCHES(BIF_P); BIF_P->freason = EXC_EXIT; erts_port_release(port); BIF_RET(am_badarg); } ERTS_MSGQ_MV_INQ2PRIVQ(BIF_P); BIF_P->msg.save = BIF_P->msg.last; erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE); res = erts_proc_store_ref(BIF_P, port->async_open_port->ref); } else { res = port->common.id; erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); } erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id); erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, port->common.id); if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, BIF_P, am_link, port->common.id); erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); erts_port_release(port); BIF_RET(res); }
/* Add messages last in message queue */ static Sint queue_messages(Process* receiver, erts_aint32_t *receiver_state, ErtsProcLocks receiver_locks, ErtsMessage* first, ErtsMessage** last, Uint len, Eterm from) { ErtsTracingEvent* te; Sint res; int locked_msgq = 0; erts_aint32_t state; ASSERT(is_value(ERL_MESSAGE_TERM(first))); ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined || ERL_MESSAGE_TOKEN(first) == NIL || is_tuple(ERL_MESSAGE_TOKEN(first))); #ifdef ERTS_ENABLE_LOCK_CHECK ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || receiver_locks == erts_proc_lc_my_proc_locks(receiver)); #endif if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks; if (receiver_state) state = *receiver_state; else state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (need_locks) { erts_proc_unlock(receiver, need_locks); } need_locks |= ERTS_PROC_LOCK_MSGQ; erts_proc_lock(receiver, need_locks); } locked_msgq = 1; } state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { exiting: /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_cleanup_messages(first); return 0; } res = receiver->msg.len; if (receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order * to ensure that the 'in queue' doesn't contain * references into the heap. By ensuring this, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ res += receiver->msg_inq.len; ERTS_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, first, last, len); } else { LINK_MESSAGE(receiver, first, last, len); } if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE) && (te = &erts_receive_tracing[erts_active_bp_ix()], te->on)) { ErtsMessage *msg = first; #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); Sint tok_label = 0; Sint tok_lastcnt = 0; Sint tok_serial = 0; Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg); dtrace_proc_str(receiver, receiver_name); if (seq_trace_token != NIL && is_tuple(seq_trace_token)) { tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token)); tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token)); tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token)); } DTRACE6(message_queued, receiver_name, size_object(ERL_MESSAGE_TERM(msg)), receiver->msg.len, tok_label, tok_lastcnt, tok_serial); } #endif while (msg) { trace_receive(receiver, from, ERL_MESSAGE_TERM(msg), te); msg = msg->next; } } if (locked_msgq) { erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } erts_proc_notify_new_message(receiver, receiver_locks); return res; }
void erts_queue_dist_message(Process *rcvr, ErtsProcLocks rcvr_locks, ErtsDistExternal *dist_ext, ErlHeapFragment *hfrag, Eterm token, Eterm from) { ErtsMessage* mp; erts_aint_t state; ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); if (hfrag) { /* Fragmented message, allocate a message reference */ mp = erts_alloc_message(0, NULL); mp->data.heap_frag = hfrag; } else { /* Un-fragmented message, allocate space for token and dist_ext in message. */ Uint dist_ext_sz = erts_dist_ext_size(dist_ext) / sizeof(Eterm); Uint token_sz = size_object(token); Uint sz = token_sz + dist_ext_sz; Eterm *hp; mp = erts_alloc_message(sz, &hp); mp->data.heap_frag = &mp->hfrag; mp->hfrag.used_size = token_sz; erts_make_dist_ext_copy(dist_ext, erts_get_dist_ext(mp->data.heap_frag)); token = copy_struct(token, token_sz, &hp, &mp->data.heap_frag->off_heap); } ERL_MESSAGE_FROM(mp) = dist_ext->dep->sysname; ERL_MESSAGE_TERM(mp) = THE_NON_VALUE; #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = NIL; if (token == am_have_dt_utag) ERL_MESSAGE_TOKEN(mp) = NIL; else #endif ERL_MESSAGE_TOKEN(mp) = token; if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; ErtsProcLocks unlocks = rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (unlocks) { erts_proc_unlock(rcvr, unlocks); need_locks |= unlocks; } erts_proc_lock(rcvr, need_locks); } } state = erts_atomic32_read_acqb(&rcvr->state); if (state & ERTS_PSFLG_EXITING) { if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } else { LINK_MESSAGE(rcvr, mp); if (rcvr_locks & ERTS_PROC_LOCK_MAIN) erts_proc_sig_fetch(rcvr); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, rcvr_locks); } }
BIF_RETTYPE erts_debug_breakpoint_2(BIF_ALIST_2) { Process* p = BIF_P; Eterm MFA = BIF_ARG_1; Eterm boolean = BIF_ARG_2; Eterm* tp; ErtsCodeMFA mfa; int i; int specified = 0; Eterm res; BpFunctions f; if (boolean != am_true && boolean != am_false) goto error; if (is_not_tuple(MFA)) { goto error; } tp = tuple_val(MFA); if (*tp != make_arityval(3)) { goto error; } if (!is_atom(tp[1]) || !is_atom(tp[2]) || (!is_small(tp[3]) && tp[3] != am_Underscore)) { goto error; } for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) { /* Empty loop body */ } for (i = specified; i < 3; i++) { if (tp[i+1] != am_Underscore) { goto error; } } mfa.module = tp[1]; mfa.function = tp[2]; if (is_small(tp[3])) { mfa.arity = signed_val(tp[3]); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_thr_progress_block(); erts_bp_match_functions(&f, &mfa, specified); if (boolean == am_true) { erts_set_debug_break(&f); erts_install_breakpoints(&f); erts_commit_staged_bp(); } else { erts_clear_debug_break(&f); erts_commit_staged_bp(); erts_uninstall_breakpoints(&f); } erts_consolidate_bp_data(&f, 1); res = make_small(f.matched); erts_bp_free_matched_functions(&f); erts_thr_progress_unblock(); erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; error: BIF_ERROR(p, BADARG); }
static Port * open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) { int merged_environment = 0; Sint i; Eterm option; Uint arity; Eterm* tp; Uint* nargs; erts_driver_t* driver; char* name_buf = NULL; SysDriverOpts opts; Sint linebuf; Eterm edir = NIL; byte dir[MAXPATHLEN]; erts_aint32_t sflgs = 0; Port *port; /* These are the defaults */ opts.packet_bytes = 0; opts.use_stdio = 1; opts.redir_stderr = 0; opts.read_write = 0; opts.hide_window = 0; opts.wd = NULL; opts.exit_status = 0; opts.overlapped_io = 0; opts.spawn_type = ERTS_SPAWN_ANY; opts.argv = NULL; opts.parallelism = erts_port_parallelism; erts_osenv_init(&opts.envir); linebuf = 0; *err_nump = 0; if (is_not_list(settings) && is_not_nil(settings)) { goto badarg; } /* * Parse the settings. */ if (is_not_nil(settings)) { nargs = list_val(settings); while (1) { if (is_tuple_arity(*nargs, 2)) { tp = tuple_val(*nargs); arity = *tp++; option = *tp++; if (option == am_packet) { if (is_not_small(*tp)) { goto badarg; } opts.packet_bytes = signed_val(*tp); switch (opts.packet_bytes) { case 1: case 2: case 4: break; default: goto badarg; } } else if (option == am_line) { if (is_not_small(*tp)) { goto badarg; } linebuf = signed_val(*tp); if (linebuf <= 0) { goto badarg; } } else if (option == am_env) { if (merged_environment) { /* Ignore previous env option */ erts_osenv_clear(&opts.envir); } merged_environment = 1; if (merge_global_environment(&opts.envir, *tp)) { goto badarg; } } else if (option == am_args) { char **av; char **oav = opts.argv; if ((av = convert_args(*tp)) == NULL) { goto badarg; } opts.argv = av; if (oav) { opts.argv[0] = oav[0]; oav[0] = erts_default_arg0; free_args(oav); } } else if (option == am_arg0) { char *a0; if ((a0 = erts_convert_filename_to_native(*tp, NULL, 0, ERTS_ALC_T_TMP, 1, 1, NULL)) == NULL) { goto badarg; } if (opts.argv == NULL) { opts.argv = erts_alloc(ERTS_ALC_T_TMP, 2 * sizeof(char **)); opts.argv[0] = a0; opts.argv[1] = NULL; } else { if (opts.argv[0] != erts_default_arg0) { erts_free(ERTS_ALC_T_TMP, opts.argv[0]); } opts.argv[0] = a0; } } else if (option == am_cd) { edir = *tp; } else if (option == am_parallelism) { if (*tp == am_true) opts.parallelism = 1; else if (*tp == am_false) opts.parallelism = 0; else goto badarg; } else { goto badarg; } } else if (*nargs == am_stream) { opts.packet_bytes = 0; } else if (*nargs == am_use_stdio) { opts.use_stdio = 1; } else if (*nargs == am_stderr_to_stdout) { opts.redir_stderr = 1; } else if (*nargs == am_line) { linebuf = 512; } else if (*nargs == am_nouse_stdio) { opts.use_stdio = 0; } else if (*nargs == am_binary) { sflgs |= ERTS_PORT_SFLG_BINARY_IO; } else if (*nargs == am_in) { opts.read_write |= DO_READ; } else if (*nargs == am_out) { opts.read_write |= DO_WRITE; } else if (*nargs == am_eof) { sflgs |= ERTS_PORT_SFLG_SOFT_EOF; } else if (*nargs == am_hide) { opts.hide_window = 1; } else if (*nargs == am_exit_status) { opts.exit_status = 1; } else if (*nargs == am_overlapped_io) { opts.overlapped_io = 1; } else { goto badarg; } if (is_nil(*++nargs)) break; if (is_not_list(*nargs)) { goto badarg; } nargs = list_val(*nargs); } } if (opts.read_write == 0) /* implement default */ opts.read_write = DO_READ|DO_WRITE; /* Mutually exclusive arguments. */ if((linebuf && opts.packet_bytes) || (opts.redir_stderr && !opts.use_stdio)) { goto badarg; } /* If we lacked an env option, fill in the global environment without * changes. */ if (!merged_environment) { merge_global_environment(&opts.envir, NIL); } /* * Parse the first argument and start the appropriate driver. */ if (is_atom(name) || (i = is_string(name))) { /* a vanilla port */ if (is_atom(name)) { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, atom_tab(atom_val(name))->len+1); sys_memcpy((void *) name_buf, (void *) atom_tab(atom_val(name))->name, atom_tab(atom_val(name))->len); name_buf[atom_tab(atom_val(name))->len] = '\0'; } else { name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1); if (intlist_to_buf(name, name_buf, i) != i) erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__); name_buf[i] = '\0'; } driver = &vanilla_driver; } else { if (is_not_tuple(name)) { goto badarg; /* Not a process or fd port */ } tp = tuple_val(name); arity = *tp++; if (arity == make_arityval(0)) { goto badarg; } if (*tp == am_spawn || *tp == am_spawn_driver || *tp == am_spawn_executable) { /* A process port */ int encoding; if (arity != make_arityval(2)) { goto badarg; } name = tp[1]; encoding = erts_get_native_filename_encoding(); /* Do not convert the command to utf-16le yet, do that in win32 specific code */ /* since the cmd is used for comparsion with drivers names and copied to port info */ if (encoding == ERL_FILENAME_WIN_WCHAR) { encoding = ERL_FILENAME_UTF8; } if ((name_buf = erts_convert_filename_to_encoding(name, NULL, 0, ERTS_ALC_T_TMP,0,1, encoding, NULL, 0)) == NULL) { goto badarg; } if (*tp == am_spawn_driver) { opts.spawn_type = ERTS_SPAWN_DRIVER; } else if (*tp == am_spawn_executable) { opts.spawn_type = ERTS_SPAWN_EXECUTABLE; } driver = &spawn_driver; } else if (*tp == am_fd) { /* An fd port */ int n; struct Sint_buf sbuf; char* p; if (arity != make_arityval(3)) { goto badarg; } if (is_not_small(tp[1]) || is_not_small(tp[2])) { goto badarg; } opts.ifd = unsigned_val(tp[1]); opts.ofd = unsigned_val(tp[2]); /* Syntesize name from input and output descriptor. */ name_buf = erts_alloc(ERTS_ALC_T_TMP, 2*sizeof(struct Sint_buf) + 2); p = Sint_to_buf(opts.ifd, &sbuf); n = sys_strlen(p); sys_strncpy(name_buf, p, n); name_buf[n] = '/'; p = Sint_to_buf(opts.ofd, &sbuf); sys_strcpy(name_buf+n+1, p); driver = &fd_driver; } else { goto badarg; } } if ((driver != &spawn_driver && opts.argv != NULL) || (driver == &spawn_driver && opts.spawn_type != ERTS_SPAWN_EXECUTABLE && opts.argv != NULL)) { /* Argument vector only if explicit spawn_executable */ goto badarg; } if (edir != NIL) { if ((opts.wd = erts_convert_filename_to_native(edir, NULL, 0, ERTS_ALC_T_TMP,0,1,NULL)) == NULL) { goto badarg; } } if (driver != &spawn_driver && opts.exit_status) { goto badarg; } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out); } erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump); #ifdef USE_VM_PROBES if (port && DTRACE_ENABLED(port_open)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(p, process_str); erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", port->common.id); DTRACE3(port_open, process_str, name_buf, port_str); } #endif if (port && IS_TRACED_FL(port, F_TRACE_PORTS)) trace_port(port, am_getting_linked, p->common.id); erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in); } if (!port) { DEBUGF(("open_driver returned (%d:%d)\n", err_typep ? *err_typep : 4711, err_nump ? *err_nump : 4711)); goto do_return; } if (linebuf && port->linebuf == NULL){ port->linebuf = allocate_linebuf(linebuf); sflgs |= ERTS_PORT_SFLG_LINEBUF_IO; } if (sflgs) erts_atomic32_read_bor_relb(&port->state, sflgs); do_return: erts_osenv_clear(&opts.envir); if (name_buf) erts_free(ERTS_ALC_T_TMP, (void *) name_buf); if (opts.argv) { free_args(opts.argv); } if (opts.wd && opts.wd != ((char *)dir)) { erts_free(ERTS_ALC_T_TMP, (void *) opts.wd); } return port; badarg: if (err_typep) *err_typep = -3; if (err_nump) *err_nump = BADARG; port = NULL; goto do_return; }