Пример #1
0
void
erts_install_breakpoints(BpFunctions* f)
{
    Uint i;
    Uint n = f->matched;
    BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);

    for (i = 0; i < n; i++) {
	BeamInstr* pc = f->matching[i].pc;
	GenericBp* g = (GenericBp *) pc[-4];
	if (*pc != br && g) {
	    Module* modp = f->matching[i].mod;

	    /*
	     * The breakpoint must be disabled in the active data
	     * (it will enabled later by switching bp indices),
	     * and enabled in the staging data.
	     */
	    ASSERT(g->data[erts_active_bp_ix()].flags == 0);
	    ASSERT(g->data[erts_staging_bp_ix()].flags != 0);

	    /*
	     * The following write is not protected by any lock. We
	     * assume that the hardware guarantees that a write of an
	     * aligned word-size (or half-word) writes is atomic
	     * (i.e. that other processes executing this code will not
	     * see a half pointer).
	     */
	    *pc = br;
	    modp->curr.num_breakpoints++;
	}
    }
}
Пример #2
0
void
erts_commit_staged_bp(void)
{
    ErtsBpIndex staging = erts_staging_bp_ix();
    ErtsBpIndex active = erts_active_bp_ix();

    erts_smp_atomic32_set_nob(&erts_active_bp_index, staging);
    erts_smp_atomic32_set_nob(&erts_staging_bp_index, active);
}
Пример #3
0
static void
uninstall_breakpoint(BeamInstr* pc)
{
    if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
	GenericBp* g = (GenericBp *) pc[-4];
	if (g->data[erts_active_bp_ix()].flags == 0) {
	    /*
	     * The following write is not protected by any lock. We
	     * assume that the hardware guarantees that a write of an
	     * aligned word-size (or half-word) writes is atomic
	     * (i.e. that other processes executing this code will not
	     * see a half pointer).
	     */
	    *pc = g->orig_instr;
	}
    }
}
Пример #4
0
/* Add messages last in message queue */
static Sint
queue_messages(Process* receiver,
               erts_aint32_t *receiver_state,
               ErtsProcLocks receiver_locks,
               ErtsMessage* first,
               ErtsMessage** last,
               Uint len,
               Eterm from)
{
    ErtsTracingEvent* te;
    Sint res;
    int locked_msgq = 0;
    erts_aint32_t state;

    ASSERT(is_value(ERL_MESSAGE_TERM(first)));
    ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined ||
           ERL_MESSAGE_TOKEN(first) == NIL ||
           is_tuple(ERL_MESSAGE_TOKEN(first)));

#ifdef ERTS_ENABLE_LOCK_CHECK
    ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
                       receiver_locks == erts_proc_lc_my_proc_locks(receiver));
#endif

    if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
	if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
            ErtsProcLocks need_locks;

	    if (receiver_state)
		state = *receiver_state;
	    else
		state = erts_atomic32_read_nob(&receiver->state);
	    if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
		goto exiting;

            need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
	    if (need_locks) {
		erts_proc_unlock(receiver, need_locks);
	    }
            need_locks |= ERTS_PROC_LOCK_MSGQ;
	    erts_proc_lock(receiver, need_locks);
	}
	locked_msgq = 1;
    }


    state = erts_atomic32_read_nob(&receiver->state);

    if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
    exiting:
	/* Drop message if receiver is exiting or has a pending exit... */
	if (locked_msgq)
	    erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
	erts_cleanup_messages(first);
	return 0;
    }

    res = receiver->msg.len;
    if (receiver_locks & ERTS_PROC_LOCK_MAIN) {
	/*
	 * We move 'in queue' to 'private queue' and place
	 * message at the end of 'private queue' in order
	 * to ensure that the 'in queue' doesn't contain
	 * references into the heap. By ensuring this,
	 * we don't need to include the 'in queue' in
	 * the root set when garbage collecting.
	 */
	res += receiver->msg_inq.len;
	ERTS_MSGQ_MV_INQ2PRIVQ(receiver);
        LINK_MESSAGE_PRIVQ(receiver, first, last, len);
    }
    else
    {
	LINK_MESSAGE(receiver, first, last, len);
    }

    if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)
        && (te = &erts_receive_tracing[erts_active_bp_ix()],
            te->on)) {

        ErtsMessage *msg = first;

#ifdef USE_VM_PROBES
        if (DTRACE_ENABLED(message_queued)) {
            DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
            Sint tok_label = 0;
            Sint tok_lastcnt = 0;
            Sint tok_serial = 0;
            Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);

            dtrace_proc_str(receiver, receiver_name);
            if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
                tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
                tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
                tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
            }
            DTRACE6(message_queued,
                    receiver_name, size_object(ERL_MESSAGE_TERM(msg)),
                    receiver->msg.len,
                    tok_label, tok_lastcnt, tok_serial);
        }
#endif
        while (msg) {
            trace_receive(receiver, from, ERL_MESSAGE_TERM(msg), te);
            msg = msg->next;
        }

    }
    if (locked_msgq) {
	erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
    }

    erts_proc_notify_new_message(receiver, receiver_locks);
    return res;
}
Пример #5
0
/*
 * Entry point called by the trace wrap functions in erl_bif_wrap.c
 *
 * The trace wrap functions are themselves called through the export
 * entries instead of the original BIF functions.
 */
Eterm
erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
{
    Eterm result;
    Eterm (*func)(Process*, Eterm*, BeamInstr*);
    Export* ep = bif_export[bif_index];
    Uint32 flags = 0, flags_meta = 0;
    ErtsTracer meta_tracer = erts_tracer_nil;
    int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
					   * is actually in the
					   * export entry */
    BeamInstr *cp = p->cp;
    GenericBp* g;
    GenericBpData* bp = NULL;
    Uint bp_flags = 0;

    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);

    g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
    if (g) {
	bp = &g->data[erts_active_bp_ix()];
	bp_flags = bp->flags;
    }

    /*
     * Make continuation pointer OK, it is not during direct BIF calls,
     * but it is correct during apply of bif.
     */
    if (!applying) {
	p->cp = I;
    }
    if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
	flags = erts_call_trace(p, ep->code, bp->local_ms, args,
				local, &ERTS_TRACER(p));
    }
    if (bp_flags & ERTS_BPF_META_TRACE) {
	ErtsTracer old_tracer;

        meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
        old_tracer = meta_tracer;
	flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
				     0, &meta_tracer);

	if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
            ErtsTracer new_tracer = erts_tracer_nil;
            erts_tracer_update(&new_tracer, meta_tracer);
	    if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
                    &bp->meta_tracer->tracer,
                    (erts_aint_t)new_tracer,
                    (erts_aint_t)old_tracer)) {
                ERTS_TRACER_CLEAR(&old_tracer);
            } else {
                ERTS_TRACER_CLEAR(&new_tracer);
            }
	}
    }
    if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	BeamInstr *pc = (BeamInstr *)ep->code+3;
	erts_trace_time_call(p, pc, bp->time);
    }

    /* Restore original continuation pointer (if changed). */
    p->cp = cp;

    func = bif_table[bif_index].f;

    result = func(p, args, I);

    if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
	BeamInstr i_return_trace      = beam_return_trace[0];
	BeamInstr i_return_to_trace   = beam_return_to_trace[0];
	BeamInstr i_return_time_trace = beam_return_time_trace[0];
	Eterm *cpp;
	/* Maybe advance cp to skip trace stack frames */
	for (cpp = p->stop;  ;  cp = cp_val(*cpp++)) {
	    if (*cp == i_return_trace) {
		/* Skip stack frame variables */
		while (is_not_CP(*cpp)) cpp++;
		cpp += 2; /* Skip return_trace parameters */
	    } else if (*cp == i_return_time_trace) {
		/* Skip stack frame variables */
		while (is_not_CP(*cpp)) cpp++;
		cpp += 1; /* Skip return_time_trace parameters */
	    } else if (*cp == i_return_to_trace) {
		/* A return_to trace message is going to be generated
		 * by normal means, so we do not have to.
		 */
		cp = NULL;
		break;
	    } else break;
	}
    }

    /* Try to get these in the order
     * they usually appear in normal code... */
    if (is_non_value(result)) {
	Uint reason = p->freason;
	if (reason != TRAP) {
	    Eterm class;
	    Eterm value = p->fvalue;
	    /* Expand error value like in handle_error() */
	    if (reason & EXF_ARGLIST) {
		Eterm *tp;
		ASSERT(is_tuple(value));
		tp = tuple_val(value);
		value = tp[1];
	    }
	    if ((reason & EXF_THROWN) && (p->catches <= 0)) {
                Eterm *hp = HAlloc(p, 3);
		value = TUPLE2(hp, am_nocatch, value);
		reason = EXC_ERROR;
	    }
	    /* Note: expand_error_value() could theoretically
	     * allocate on the heap, but not for any error
	     * returned by a BIF, and it would do no harm,
	     * just be annoying.
	     */
	    value = expand_error_value(p, reason, value);
	    class = exception_tag[GET_EXC_CLASS(reason)];

	    if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
		erts_trace_exception(p, ep->code, class, value,
				     &meta_tracer);
	    }
	    if (flags & MATCH_SET_EXCEPTION_TRACE) {
		erts_trace_exception(p, ep->code, class, value,
				     &ERTS_TRACER(p));
	    }
Пример #6
0
BeamInstr
erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
{
    GenericBp* g;
    GenericBpData* bp;
    Uint bp_flags;
    ErtsBpIndex ix = erts_active_bp_ix();

    g = (GenericBp *) I[-4];
    bp = &g->data[ix];
    bp_flags = bp->flags;
    ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
    if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
		    ERTS_BPF_GLOBAL_TRACE|
		    ERTS_BPF_TIME_TRACE_ACTIVE) &&
	!IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
	bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
		      ERTS_BPF_GLOBAL_TRACE|
		      ERTS_BPF_TIME_TRACE|
		      ERTS_BPF_TIME_TRACE_ACTIVE);
	if (bp_flags == 0) {	/* Quick exit */
	    return g->orig_instr;
	}
    }

    if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
	ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
	(void) do_call_trace(c_p, I, reg, 1, bp->local_ms, erts_tracer_true);
    } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
	(void) do_call_trace(c_p, I, reg, 0, bp->local_ms, erts_tracer_true);
    }

    if (bp_flags & ERTS_BPF_META_TRACE) {
	ErtsTracer old_tracer, new_tracer;

	old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);

	new_tracer = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_tracer);
	if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
            if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
                    &bp->meta_tracer->tracer,
                    (erts_aint_t)new_tracer,
                    (erts_aint_t)old_tracer)) {
                ERTS_TRACER_CLEAR(&old_tracer);
            } else {
                ERTS_TRACER_CLEAR(&new_tracer);
            }
	}
    }

    if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
	erts_smp_atomic_inc_nob(&bp->count->acount);
    }

    if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
	Eterm w;
	erts_trace_time_call(c_p, I, bp->time);
	w = (BeamInstr) *c_p->cp;
	if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
	       w == (BeamInstr) BeamOp(op_return_trace) ||
	       w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) {
	    Eterm* E = c_p->stop;
	    ASSERT(c_p->htop <= E && E <= c_p->hend);
	    if (E - 2 < c_p->htop) {
		(void) erts_garbage_collect(c_p, 2, reg, I[-1]);
		ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
	    }
	    E = c_p->stop;

	    ASSERT(c_p->htop <= E && E <= c_p->hend);

	    E -= 2;
	    E[0] = make_cp(I);
	    E[1] = make_cp(c_p->cp);     /* original return address */
	    c_p->cp = beam_return_time_trace;
	    c_p->stop = E;
	}
    }

    if (bp_flags & ERTS_BPF_DEBUG) {
	return (BeamInstr) BeamOp(op_i_debug_breakpoint);
    } else {
	return g->orig_instr;
    }
}
Пример #7
0
static void
consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
{
    GenericBp* g = (GenericBp *) pc[-4];
    GenericBpData* src;
    GenericBpData* dst;
    Uint flags;

    if (g == 0) {
	return;
    }

    src = &g->data[erts_active_bp_ix()];
    dst = &g->data[erts_staging_bp_ix()];

    /*
     * The contents of the staging area may be out of date.
     * Decrement all reference pointers.
     */

    flags = dst->flags;
    if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
	MatchSetUnref(dst->local_ms);
    }
    if (flags & ERTS_BPF_META_TRACE) {
	bp_meta_unref(dst->meta_tracer);
	MatchSetUnref(dst->meta_ms);
    }
    if (flags & ERTS_BPF_COUNT) {
	bp_count_unref(dst->count);
    }
    if (flags & ERTS_BPF_TIME_TRACE) {
	bp_time_unref(dst->time);
    }

    /*
     * If all flags are zero, deallocate all breakpoint data.
     */

    flags = dst->flags = src->flags;
    if (flags == 0) {
	if (modp) {
	    if (local) {
		modp->curr.num_breakpoints--;
	    } else {
		modp->curr.num_traced_exports--;
	    }
	    ASSERT(modp->curr.num_breakpoints >= 0);
	    ASSERT(modp->curr.num_traced_exports >= 0);
	    ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
	}
	pc[-4] = 0;
	Free(g);
	return;
    }

    /*
     * Copy the active data to the staging area (making it ready
     * for the next time it will be used).
     */

    if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
	dst->local_ms = src->local_ms;
	MatchSetRef(dst->local_ms);
    }
    if (flags & ERTS_BPF_META_TRACE) {
	dst->meta_tracer = src->meta_tracer;
	erts_refc_inc(&dst->meta_tracer->refc, 1);
	dst->meta_ms = src->meta_ms;
	MatchSetRef(dst->meta_ms);
    }
    if (flags & ERTS_BPF_COUNT) {
	dst->count = src->count;
	erts_refc_inc(&dst->count->refc, 1);
    }
    if (flags & ERTS_BPF_TIME_TRACE) {
	dst->time = src->time;
	erts_refc_inc(&dst->time->refc, 1);
	ASSERT(dst->time->hash);
    }
}
Пример #8
0
/*
 * Entry point called by the trace wrap functions in erl_bif_wrap.c
 *
 * The trace wrap functions are themselves called through the export
 * entries instead of the original BIF functions.
 */
Eterm
erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
{
    Eterm result;
    Eterm (*func)(Process*, Eterm*, BeamInstr*);
    Export* ep = bif_export[bif_index];
    Uint32 flags = 0, flags_meta = 0;
    ErtsTracer meta_tracer = erts_tracer_nil;
    int applying = (I == ep->beam); /* Yup, the apply code for a bif
                                      * is actually in the
                                      * export entry */
    BeamInstr *cp = p->cp;
    GenericBp* g;
    GenericBpData* bp = NULL;
    Uint bp_flags = 0;

    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);

    g = ep->info.u.gen_bp;
    if (g) {
	bp = &g->data[erts_active_bp_ix()];
	bp_flags = bp->flags;
    }

    /*
     * Make continuation pointer OK, it is not during direct BIF calls,
     * but it is correct during apply of bif.
     */
    if (!applying) {
	p->cp = I;
    }
    if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
	flags = erts_call_trace(p, &ep->info, bp->local_ms, args,
				local, &ERTS_TRACER(p));
    }
    if (bp_flags & ERTS_BPF_META_TRACE) {
	ErtsTracer old_tracer;

        meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
        old_tracer = meta_tracer;
	flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args,
				     0, &meta_tracer);

	if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
            ErtsTracer new_tracer = erts_tracer_nil;
            erts_tracer_update(&new_tracer, meta_tracer);
	    if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
                    &bp->meta_tracer->tracer,
                    (erts_aint_t)new_tracer,
                    (erts_aint_t)old_tracer)) {
                ERTS_TRACER_CLEAR(&old_tracer);
            } else {
                ERTS_TRACER_CLEAR(&new_tracer);
            }
	}
    }
    if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	erts_trace_time_call(p, &ep->info, bp->time);
    }

    /* Restore original continuation pointer (if changed). */
    p->cp = cp;

    func = bif_table[bif_index].f;

    result = func(p, args, I);

    if (erts_nif_export_check_save_trace(p, result,
					 applying, ep,
					 cp, flags,
					 flags_meta, I,
					 meta_tracer)) {
	/*
	 * erts_bif_trace_epilogue() will be called
	 * later when appropriate via the NIF export
	 * scheduling functionality...
	 */
	return result;
    }

    return erts_bif_trace_epilogue(p, result, applying, ep, cp,
				   flags, flags_meta, I,
				   meta_tracer);
}