static void table_end_staging_ranges(ErtsAlcType_t alctr, struct ranges* r, int commit) { ErtsCodeIndex dst = erts_staging_code_ix(); if (commit && r[dst].modules == NULL) { Sint i; Sint n; /* No modules added, just clone src and remove purged code. */ ErtsCodeIndex src = erts_active_code_ix(); erts_smp_atomic_add_nob(&mem_used, r[src].n); r[dst].modules = erts_alloc(alctr, r[src].n * sizeof(Range)); r[dst].allocated = r[src].n; n = 0; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n] = *rp; n++; } } r[dst].n = n; erts_smp_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + n / 2)); } }
void erts_cleanup_port_data(Port *prt) { ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP); cleanup_old_port_data(erts_smp_atomic_read_nob(&prt->data)); erts_smp_atomic_set_nob(&prt->data, (erts_aint_t) THE_NON_VALUE); }
static ERTS_INLINE void reset_handle(ErtsPortTask *ptp) { if (ptp->handle) { ASSERT(ptp == handle2task(ptp->handle)); erts_smp_atomic_set_nob(ptp->handle, (erts_aint_t) NULL); } }
static ERTS_INLINE void set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) { ptp->handle = pthp; if (pthp) { erts_smp_atomic_set_nob(pthp, (erts_aint_t) ptp); ASSERT(ptp == handle2task(ptp->handle)); } }
void erts_remove_from_ranges(BeamInstr* code) { Range* rp = find_range(range_tables, code); #ifdef ERTS_SLAVE_EMU_ENABLED if (slave_initialised && rp == 0) rp = find_range(slave_range_tables, code); #endif erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); }
ErtsMigrateResult erts_port_migrate(Port *prt, int *prt_locked, ErtsRunQueue *from_rq, int *from_locked, ErtsRunQueue *to_rq, int *to_locked) { ERTS_SMP_LC_ASSERT(*from_locked); ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked); ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked); ASSERT(!erts_common_run_queue); if (!*from_locked || !*to_locked) { if (from_rq < to_rq) { if (!*to_locked) { if (!*from_locked) erts_smp_runq_lock(from_rq); erts_smp_runq_lock(to_rq); } else if (erts_smp_runq_trylock(from_rq) == EBUSY) { erts_smp_runq_unlock(to_rq); erts_smp_runq_lock(from_rq); erts_smp_runq_lock(to_rq); } } else { if (!*from_locked) { if (!*to_locked) erts_smp_runq_lock(to_rq); erts_smp_runq_lock(from_rq); } else if (erts_smp_runq_trylock(to_rq) == EBUSY) { erts_smp_runq_unlock(from_rq); erts_smp_runq_lock(to_rq); erts_smp_runq_lock(from_rq); } } *to_locked = *from_locked = 1; } ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked); ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked); /* Refuse to migrate to a suspended run queue */ if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED) return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED; if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue)) return ERTS_MIGRATE_FAILED_RUNQ_CHANGED; if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt)) return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ; dequeue_port(from_rq, prt); erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) to_rq); enqueue_port(to_rq, prt); return ERTS_MIGRATE_SUCCESS; }
int erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) { int port_was_enqueued = 0; Port *pp; ErtsPortTaskQueue *ptqp; ErtsPortTask *ptp; int res = 0; int reds = ERTS_PORT_REDS_EXECUTE; erts_aint_t io_tasks_executed = 0; int fpe_was_unmasked; ErtsPortTaskExeBlockData blk_data = {runq, NULL}; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); ERTS_PT_CHK_PORTQ(runq); pp = pop_port(runq); if (!pp) { res = 0; goto done; } ERTS_PORT_NOT_IN_RUNQ(pp); *curr_port_pp = pp; ASSERT(pp->sched.taskq); ASSERT(pp->sched.taskq->first); ptqp = pp->sched.taskq; pp->sched.taskq = NULL; ASSERT(!pp->sched.exe_taskq); pp->sched.exe_taskq = ptqp; if (erts_smp_port_trylock(pp) == EBUSY) { erts_smp_runq_unlock(runq); erts_smp_port_lock(pp); erts_smp_runq_lock(runq); } if (erts_sched_stat.enabled) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no); int migrated = old && old != esdp->no; erts_smp_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++; if (migrated) { erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++; } erts_smp_spin_unlock(&erts_sched_stat.lock); } /* trace port scheduling, in */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { trace_sched_ports(pp, am_in); } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_PT_CHK_PRES_PORTQ(runq, pp); ptp = pop_task(ptqp); fpe_was_unmasked = erts_block_fpe(); while (ptp) { ASSERT(pp->sched.taskq != pp->sched.exe_taskq); reset_handle(ptp); erts_smp_runq_unlock(runq); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_SMP_CHK_NO_PROC_LOCKS; ASSERT(pp->drv_ptr); switch (ptp->type) { case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */ reds += ERTS_PORT_REDS_FREE; erts_smp_runq_lock(runq); erts_unblock_fpe(fpe_was_unmasked); ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED); if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first)) handle_remaining_tasks(runq, pp); ASSERT(!ptqp->first && (!pp->sched.taskq || !pp->sched.taskq->first)); #ifdef ERTS_SMP erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ #else erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE); #endif port_task_free(ptp); if (pp->sched.taskq) port_taskq_free(pp->sched.taskq); pp->sched.taskq = NULL; goto tasks_done; case ERTS_PORT_TASK_TIMEOUT: reds += ERTS_PORT_REDS_TIMEOUT; if (!(pp->status & ERTS_PORT_SFLGS_DEAD)) (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data); break; case ERTS_PORT_TASK_INPUT: reds += ERTS_PORT_REDS_INPUT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); /* NOTE some windows drivers use ->ready_input for input and output */ (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event); io_tasks_executed++; break; case ERTS_PORT_TASK_OUTPUT: reds += ERTS_PORT_REDS_OUTPUT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event); io_tasks_executed++; break; case ERTS_PORT_TASK_EVENT: reds += ERTS_PORT_REDS_EVENT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data); io_tasks_executed++; break; case ERTS_PORT_TASK_DIST_CMD: reds += erts_dist_command(pp, CONTEXT_REDS-reds); break; default: erl_exit(ERTS_ABORT_EXIT, "Invalid port task type: %d\n", (int) ptp->type); break; } if ((pp->status & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(pp)) { reds += ERTS_PORT_REDS_TERMINATE; erts_terminate_port(pp); } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); #ifdef ERTS_SMP if (pp->xports) erts_smp_xports_unlock(pp); ASSERT(!pp->xports); #endif ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); port_task_free(ptp); erts_smp_runq_lock(runq); ptp = pop_task(ptqp); } tasks_done: erts_unblock_fpe(fpe_was_unmasked); if (io_tasks_executed) { ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed); erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks, -1*io_tasks_executed); } *curr_port_pp = NULL; #ifdef ERTS_SMP ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); #endif if (!pp->sched.taskq) { ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; } else { #ifdef ERTS_SMP ErtsRunQueue *xrunq; #endif ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD)); ASSERT(pp->sched.taskq->first); #ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (!xrunq) { #endif enqueue_port(runq, pp); ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; /* No need to notify ourselves about inc in runq. */ #ifdef ERTS_SMP } else { /* Port emigrated ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); enqueue_port(xrunq, pp); ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; erts_smp_runq_unlock(xrunq); erts_smp_notify_inc_runq(xrunq); } #endif port_was_enqueued = 1; } res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); ERTS_PT_CHK_PRES_PORTQ(runq, pp); port_taskq_free(ptqp); if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) { profile_runnable_port(pp, am_inactive); } /* trace port scheduling, out */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { trace_sched_ports(pp, am_out); } #ifndef ERTS_SMP erts_port_release(pp); #else { erts_aint_t refc; erts_smp_mtx_unlock(pp->lock); refc = erts_smp_atomic_dec_read_nob(&pp->refc); ASSERT(refc >= 0); if (refc == 0) { erts_smp_runq_unlock(runq); erts_port_cleanup(pp); /* Might aquire runq lock */ erts_smp_runq_lock(runq); res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); } } #endif done: blk_data.resp = &res; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds); return res; }
int erts_port_task_schedule(Eterm id, ErtsPortTaskHandle *pthp, ErtsPortTaskType type, ErlDrvEvent event, ErlDrvEventData event_data) { ErtsRunQueue *runq; Port *pp; ErtsPortTask *ptp; int enq_port = 0; /* * NOTE: We might not have the port lock here. We are only * allowed to access the 'sched', 'tab_status', * and 'id' fields of the port struct while * tasks_lock is held. */ if (pthp && erts_port_task_is_scheduled(pthp)) { ASSERT(0); erts_port_task_abort(id, pthp); } ptp = port_task_alloc(); ASSERT(is_internal_port(id)); pp = &erts_port[internal_port_index(id)]; runq = erts_port_runq(pp); if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) { if (runq) erts_smp_runq_unlock(runq); return -1; } ASSERT(!erts_port_task_is_scheduled(pthp)); ERTS_PT_CHK_PRES_PORTQ(runq, pp); if (!pp->sched.taskq) { pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp); enq_port = !pp->sched.exe_taskq; } #ifdef ERTS_SMP if (enq_port) { ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (xrunq) { /* Port emigrated ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); runq = xrunq; } } #endif ASSERT(!enq_port || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED)); ASSERT(pp->sched.taskq); ASSERT(ptp); ptp->type = type; ptp->event = event; ptp->event_data = event_data; set_handle(ptp, pthp); switch (type) { case ERTS_PORT_TASK_FREE: erl_exit(ERTS_ABORT_EXIT, "erts_port_task_schedule(): Cannot schedule free task\n"); break; case ERTS_PORT_TASK_INPUT: case ERTS_PORT_TASK_OUTPUT: case ERTS_PORT_TASK_EVENT: erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); /* Fall through... */ default: enqueue_task(pp->sched.taskq, ptp); break; } #ifndef ERTS_SMP /* * When (!enq_port && !pp->sched.exe_taskq) is true in the smp case, * the port might not be in the run queue. If this is the case, another * thread is in the process of enqueueing the port. This very seldom * occur, but do occur and is a valid scenario. Debug info showing this * enqueue in progress must be introduced before we can enable (modified * versions of these) assertions in the smp case again. */ #if defined(HARD_DEBUG) if (pp->sched.exe_taskq || enq_port) ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp); else ERTS_PT_CHK_IN_PORTQ(runq, pp); #elif defined(DEBUG) if (!enq_port && !pp->sched.exe_taskq) { /* We should be in port run q */ ASSERT(pp->sched.prev || runq->ports.start == pp); } #endif #endif if (!enq_port) { ERTS_PT_CHK_PRES_PORTQ(runq, pp); erts_smp_runq_unlock(runq); } else { enqueue_port(runq, pp); ERTS_PT_CHK_PRES_PORTQ(runq, pp); if (erts_system_profile_flags.runnable_ports) { profile_runnable_port(pp, am_active); } erts_smp_runq_unlock(runq); erts_smp_notify_inc_runq(runq); } return 0; }
static void table_update_ranges(ErtsAlcType_t alctr, struct ranges* r, BeamInstr* code, Uint size) { ErtsCodeIndex dst = erts_staging_code_ix(); ErtsCodeIndex src = erts_active_code_ix(); Sint i; Sint n; Sint need; if (src == dst) { ASSERT(!erts_initialized); /* * During start-up of system, the indices are the same. * Handle this by faking a source area. */ src = (src+1) % ERTS_NUM_CODE_IX; if (r[src].modules) { erts_smp_atomic_add_nob(&mem_used, -r[src].allocated); erts_free(alctr, r[src].modules); } r[src] = r[dst]; r[dst].modules = 0; } CHECK(&r[src]); ASSERT(r[dst].modules == NULL); need = r[dst].allocated = r[src].n + 1; erts_smp_atomic_add_nob(&mem_used, need); r[dst].modules = (Range *) erts_alloc(alctr, need * sizeof(Range)); n = 0; for (i = 0; i < r[src].n; i++) { Range* rp = r[src].modules+i; if (code < rp->start) { r[dst].modules[n].start = code; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(((byte *)code) + size)); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code); n++; break; } if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n].start = rp->start; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(RANGE_END(rp))); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start); n++; } } while (i < r[src].n) { Range* rp = r[src].modules+i; if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ r[dst].modules[n].start = rp->start; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(RANGE_END(rp))); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start); n++; } i++; } if (n == 0 || code > r[dst].modules[n-1].start) { r[dst].modules[n].start = code; erts_smp_atomic_init_nob(&r[dst].modules[n].end, (erts_aint_t)(((byte *)code) + size)); ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code); n++; } ASSERT(n <= r[src].n+1); r[dst].n = n; erts_smp_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + n / 2)); CHECK(&r[dst]); CHECK(&r[src]); }
/* * Entry point called by the trace wrap functions in erl_bif_wrap.c * * The trace wrap functions are themselves called through the export * entries instead of the original BIF functions. */ Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) { Eterm result; Eterm (*func)(Process*, Eterm*, BeamInstr*); Export* ep = bif_export[bif_index]; Uint32 flags = 0, flags_meta = 0; Eterm meta_tracer_pid = NIL; int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif * is actually in the * export entry */ BeamInstr *cp = p->cp; GenericBp* g; GenericBpData* bp = NULL; Uint bp_flags = 0; ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); g = (GenericBp *) ep->fake_op_func_info_for_hipe[1]; if (g) { bp = &g->data[erts_active_bp_ix()]; bp_flags = bp->flags; } /* * Make continuation pointer OK, it is not during direct BIF calls, * but it is correct during apply of bif. */ if (!applying) { p->cp = I; } if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) && IS_TRACED_FL(p, F_TRACE_CALLS)) { int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE); flags = erts_call_trace(p, ep->code, bp->local_ms, args, local, &ERTS_TRACER_PROC(p)); } if (bp_flags & ERTS_BPF_META_TRACE) { Eterm tpid1, tpid2; tpid1 = tpid2 = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid); flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args, 0, &tpid2); meta_tracer_pid = tpid2; if (tpid1 != tpid2) { erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2); } } if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && IS_TRACED_FL(p, F_TRACE_CALLS) && erts_is_tracer_proc_valid(p)) { BeamInstr *pc = (BeamInstr *)ep->code+3; erts_trace_time_call(p, pc, bp->time); } /* Restore original continuation pointer (if changed). */ p->cp = cp; func = bif_table[bif_index].f; result = func(p, args, I); if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) { BeamInstr i_return_trace = beam_return_trace[0]; BeamInstr i_return_to_trace = beam_return_to_trace[0]; BeamInstr i_return_time_trace = beam_return_time_trace[0]; Eterm *cpp; /* Maybe advance cp to skip trace stack frames */ for (cpp = p->stop; ; cp = cp_val(*cpp++)) { if (*cp == i_return_trace) { /* Skip stack frame variables */ while (is_not_CP(*cpp)) cpp++; cpp += 2; /* Skip return_trace parameters */ } else if (*cp == i_return_time_trace) { /* Skip stack frame variables */ while (is_not_CP(*cpp)) cpp++; cpp += 1; /* Skip return_time_trace parameters */ } else if (*cp == i_return_to_trace) { /* A return_to trace message is going to be generated * by normal means, so we do not have to. */ cp = NULL; break; } else break; } } /* Try to get these in the order * they usually appear in normal code... */ if (is_non_value(result)) { Uint reason = p->freason; if (reason != TRAP) { Eterm class; Eterm value = p->fvalue; DeclareTmpHeapNoproc(nocatch,3); UseTmpHeapNoproc(3); /* Expand error value like in handle_error() */ if (reason & EXF_ARGLIST) { Eterm *tp; ASSERT(is_tuple(value)); tp = tuple_val(value); value = tp[1]; } if ((reason & EXF_THROWN) && (p->catches <= 0)) { value = TUPLE2(nocatch, am_nocatch, value); reason = EXC_ERROR; } /* Note: expand_error_value() could theoretically * allocate on the heap, but not for any error * returned by a BIF, and it would do no harm, * just be annoying. */ value = expand_error_value(p, reason, value); class = exception_tag[GET_EXC_CLASS(reason)]; if (flags_meta & MATCH_SET_EXCEPTION_TRACE) { erts_trace_exception(p, ep->code, class, value, &meta_tracer_pid); } if (flags & MATCH_SET_EXCEPTION_TRACE) { erts_trace_exception(p, ep->code, class, value, &ERTS_TRACER_PROC(p)); }
BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg) { GenericBp* g; GenericBpData* bp; Uint bp_flags; ErtsBpIndex ix = erts_active_bp_ix(); g = (GenericBp *) I[-4]; bp = &g->data[ix]; bp_flags = bp->flags; ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0); if (bp_flags & (ERTS_BPF_LOCAL_TRACE| ERTS_BPF_GLOBAL_TRACE| ERTS_BPF_TIME_TRACE_ACTIVE) && !IS_TRACED_FL(c_p, F_TRACE_CALLS)) { bp_flags &= ~(ERTS_BPF_LOCAL_TRACE| ERTS_BPF_GLOBAL_TRACE| ERTS_BPF_TIME_TRACE| ERTS_BPF_TIME_TRACE_ACTIVE); if (bp_flags == 0) { /* Quick exit */ return g->orig_instr; } } if (bp_flags & ERTS_BPF_LOCAL_TRACE) { ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0); (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true); } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) { (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true); } if (bp_flags & ERTS_BPF_META_TRACE) { Eterm old_pid; Eterm new_pid; old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid); new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid); if (new_pid != old_pid) { erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid); } } if (bp_flags & ERTS_BPF_COUNT_ACTIVE) { erts_smp_atomic_inc_nob(&bp->count->acount); } if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && erts_is_tracer_proc_valid(c_p)) { Eterm w; erts_trace_time_call(c_p, I, bp->time); w = (BeamInstr) *c_p->cp; if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) || w == (BeamInstr) BeamOp(op_return_trace) || w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) { Eterm* E = c_p->stop; ASSERT(c_p->htop <= E && E <= c_p->hend); if (E - 2 < c_p->htop) { (void) erts_garbage_collect(c_p, 2, reg, I[-1]); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); } E = c_p->stop; ASSERT(c_p->htop <= E && E <= c_p->hend); E -= 2; E[0] = make_cp(I); E[1] = make_cp(c_p->cp); /* original return address */ c_p->cp = beam_return_time_trace; c_p->stop = E; } } if (bp_flags & ERTS_BPF_DEBUG) { return (BeamInstr) BeamOp(op_i_debug_breakpoint); } else { return g->orig_instr; } }