コード例 #1
0
ファイル: register.c プロジェクト: hawk/otp
BIF_RETTYPE registered_0(BIF_ALIST_0)
{
    int i;
    Eterm res;
    Uint need;
    Eterm* hp;
    HashBucket **bucket;
#ifdef ERTS_SMP
    ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;

    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P);
    reg_safe_read_lock(BIF_P, &proc_locks);
    if (!proc_locks)
	erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
#endif

    bucket = process_reg.bucket;

    /* work out how much heap we need & maybe garb, by scanning through
       the registered process table */
    need = 0;
    for (i = 0; i < process_reg.size; i++) {
	HashBucket *b = bucket[i];
	while (b != NULL) {
	    need += 2;
	    b = b->next;
	}
    }

    if (need == 0) {
	reg_read_unlock();
	BIF_RET(NIL);
    }

    hp = HAlloc(BIF_P, need);
     
     /* scan through again and make the list */ 
    res = NIL;

    for (i = 0; i < process_reg.size; i++) {
	HashBucket *b = bucket[i];
	while (b != NULL) {
	    RegProc *reg = (RegProc *) b;

	    res = CONS(hp, reg->name, res);
	    hp += 2;
	    b = b->next;
	}
    }

    reg_read_unlock();

    BIF_RET(res);
}
コード例 #2
0
ファイル: register.c プロジェクト: hawk/otp
Eterm
erts_whereis_name_to_id(Process *c_p, Eterm name)
{
    Eterm res = am_undefined;
    HashValue hval;
    int ix;
    HashBucket* b;
#ifdef ERTS_SMP
    ErtsProcLocks c_p_locks = c_p ? ERTS_PROC_LOCK_MAIN : 0;

#ifdef ERTS_ENABLE_LOCK_CHECK
    if (c_p) ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
#endif

    reg_safe_read_lock(c_p, &c_p_locks);
    if (c_p && !c_p_locks)
        erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
#endif

    hval = REG_HASH(name);
    ix = hval % process_reg.size;
    b = process_reg.bucket[ix];

    /*
     * Note: We have inlined the code from hash.c for speed.
     */
	
    while (b) {
	RegProc* rp = (RegProc *) b;
	if (rp->name == name) {
	    /*
	     * SMP NOTE: No need to lock registered entity since it cannot
	     * be removed without acquiring write reg lock and id on entity
	     * is read only.
	     */
	    if (rp->p)
		res = rp->p->common.id;
	    else if (rp->pt)
		res = rp->pt->common.id;
	    break;
	}
	b = b->next;
    }

    reg_read_unlock();

    ASSERT(is_internal_pid(res) || is_internal_port(res) || res==am_undefined);

    return res;
}
コード例 #3
0
ファイル: beam_bp.c プロジェクト: Duncaen/otp
/*
 * Entry point called by the trace wrap functions in erl_bif_wrap.c
 *
 * The trace wrap functions are themselves called through the export
 * entries instead of the original BIF functions.
 */
Eterm
erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
{
    Eterm result;
    Eterm (*func)(Process*, Eterm*, BeamInstr*);
    Export* ep = bif_export[bif_index];
    Uint32 flags = 0, flags_meta = 0;
    ErtsTracer meta_tracer = erts_tracer_nil;
    int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
					   * is actually in the
					   * export entry */
    BeamInstr *cp = p->cp;
    GenericBp* g;
    GenericBpData* bp = NULL;
    Uint bp_flags = 0;

    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);

    g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
    if (g) {
	bp = &g->data[erts_active_bp_ix()];
	bp_flags = bp->flags;
    }

    /*
     * Make continuation pointer OK, it is not during direct BIF calls,
     * but it is correct during apply of bif.
     */
    if (!applying) {
	p->cp = I;
    }
    if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
	flags = erts_call_trace(p, ep->code, bp->local_ms, args,
				local, &ERTS_TRACER(p));
    }
    if (bp_flags & ERTS_BPF_META_TRACE) {
	ErtsTracer old_tracer;

        meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
        old_tracer = meta_tracer;
	flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
				     0, &meta_tracer);

	if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
            ErtsTracer new_tracer = erts_tracer_nil;
            erts_tracer_update(&new_tracer, meta_tracer);
	    if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
                    &bp->meta_tracer->tracer,
                    (erts_aint_t)new_tracer,
                    (erts_aint_t)old_tracer)) {
                ERTS_TRACER_CLEAR(&old_tracer);
            } else {
                ERTS_TRACER_CLEAR(&new_tracer);
            }
	}
    }
    if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	BeamInstr *pc = (BeamInstr *)ep->code+3;
	erts_trace_time_call(p, pc, bp->time);
    }

    /* Restore original continuation pointer (if changed). */
    p->cp = cp;

    func = bif_table[bif_index].f;

    result = func(p, args, I);

    if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
	BeamInstr i_return_trace      = beam_return_trace[0];
	BeamInstr i_return_to_trace   = beam_return_to_trace[0];
	BeamInstr i_return_time_trace = beam_return_time_trace[0];
	Eterm *cpp;
	/* Maybe advance cp to skip trace stack frames */
	for (cpp = p->stop;  ;  cp = cp_val(*cpp++)) {
	    if (*cp == i_return_trace) {
		/* Skip stack frame variables */
		while (is_not_CP(*cpp)) cpp++;
		cpp += 2; /* Skip return_trace parameters */
	    } else if (*cp == i_return_time_trace) {
		/* Skip stack frame variables */
		while (is_not_CP(*cpp)) cpp++;
		cpp += 1; /* Skip return_time_trace parameters */
	    } else if (*cp == i_return_to_trace) {
		/* A return_to trace message is going to be generated
		 * by normal means, so we do not have to.
		 */
		cp = NULL;
		break;
	    } else break;
	}
    }

    /* Try to get these in the order
     * they usually appear in normal code... */
    if (is_non_value(result)) {
	Uint reason = p->freason;
	if (reason != TRAP) {
	    Eterm class;
	    Eterm value = p->fvalue;
	    /* Expand error value like in handle_error() */
	    if (reason & EXF_ARGLIST) {
		Eterm *tp;
		ASSERT(is_tuple(value));
		tp = tuple_val(value);
		value = tp[1];
	    }
	    if ((reason & EXF_THROWN) && (p->catches <= 0)) {
                Eterm *hp = HAlloc(p, 3);
		value = TUPLE2(hp, am_nocatch, value);
		reason = EXC_ERROR;
	    }
	    /* Note: expand_error_value() could theoretically
	     * allocate on the heap, but not for any error
	     * returned by a BIF, and it would do no harm,
	     * just be annoying.
	     */
	    value = expand_error_value(p, reason, value);
	    class = exception_tag[GET_EXC_CLASS(reason)];

	    if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
		erts_trace_exception(p, ep->code, class, value,
				     &meta_tracer);
	    }
	    if (flags & MATCH_SET_EXCEPTION_TRACE) {
		erts_trace_exception(p, ep->code, class, value,
				     &ERTS_TRACER(p));
	    }
コード例 #4
0
ファイル: register.c プロジェクト: hawk/otp
/*
 * Register a process or port (can't be registered twice).
 * Returns 0 if name, process or port is already registered.
 *
 * When smp support is enabled:
 *   * Assumes that main lock is locked (and only main lock)
 *     on c_p.
 *
 */
int erts_register_name(Process *c_p, Eterm name, Eterm id)
{
    int res = 0;
    Process *proc = NULL;
    Port *port = NULL;
    RegProc r, *rp;
    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);

    if (is_not_atom(name) || name == am_undefined)
	return res;

    if (c_p->common.id == id) /* A very common case I think... */
	proc = c_p;
    else {
	if (is_not_internal_pid(id) && is_not_internal_port(id))
	    return res;
	erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
	if (is_internal_port(id)) {
	    port = erts_id2port(id);
	    if (!port)
		goto done;
	}
    }

#ifdef ERTS_SMP
    {
	ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0;
	reg_safe_write_lock(proc, &proc_locks);

	if (proc && !proc_locks)
	    erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
    }
#endif

    if (is_internal_pid(id)) {
	if (!proc)
	    proc = erts_pid2proc(NULL, 0, id, ERTS_PROC_LOCK_MAIN);
	r.p = proc;
	if (!proc)
	    goto done;
	if (proc->common.u.alive.reg)
	    goto done;
	r.pt = NULL;
    }
    else {
	ASSERT(!INVALID_PORT(port, id));
	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
	r.pt = port;
	if (r.pt->common.u.alive.reg)
	    goto done;
	r.p = NULL;
    }

    r.name = name;
    
    rp = (RegProc*) hash_put(&process_reg, (void*) &r);
    if (proc && rp->p == proc) {
	if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
	    trace_proc(proc, ERTS_PROC_LOCK_MAIN,
                       proc, am_register, name);
	}
	proc->common.u.alive.reg = rp;
    }
    else if (port && rp->pt == port) {
    	if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
		trace_port(port, am_register, name);
	}
	port->common.u.alive.reg = rp;
    }

    if ((rp->p && rp->p->common.id == id)
	|| (rp->pt && rp->pt->common.id == id)) {
	res = 1;
    }

 done:
    reg_write_unlock();
    if (port)
	erts_port_release(port);
    if (c_p != proc) {
	if (proc)
	    erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
	erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
    }
    return res;
}
コード例 #5
0
ファイル: beam_bp.c プロジェクト: josevalim/otp
/*
 * Entry point called by the trace wrap functions in erl_bif_wrap.c
 *
 * The trace wrap functions are themselves called through the export
 * entries instead of the original BIF functions.
 */
Eterm
erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
{
    Eterm result;
    Eterm (*func)(Process*, Eterm*, BeamInstr*);
    Export* ep = bif_export[bif_index];
    Uint32 flags = 0, flags_meta = 0;
    ErtsTracer meta_tracer = erts_tracer_nil;
    int applying = (I == ep->beam); /* Yup, the apply code for a bif
                                      * is actually in the
                                      * export entry */
    BeamInstr *cp = p->cp;
    GenericBp* g;
    GenericBpData* bp = NULL;
    Uint bp_flags = 0;

    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);

    g = ep->info.u.gen_bp;
    if (g) {
	bp = &g->data[erts_active_bp_ix()];
	bp_flags = bp->flags;
    }

    /*
     * Make continuation pointer OK, it is not during direct BIF calls,
     * but it is correct during apply of bif.
     */
    if (!applying) {
	p->cp = I;
    }
    if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
	flags = erts_call_trace(p, &ep->info, bp->local_ms, args,
				local, &ERTS_TRACER(p));
    }
    if (bp_flags & ERTS_BPF_META_TRACE) {
	ErtsTracer old_tracer;

        meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
        old_tracer = meta_tracer;
	flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args,
				     0, &meta_tracer);

	if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
            ErtsTracer new_tracer = erts_tracer_nil;
            erts_tracer_update(&new_tracer, meta_tracer);
	    if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
                    &bp->meta_tracer->tracer,
                    (erts_aint_t)new_tracer,
                    (erts_aint_t)old_tracer)) {
                ERTS_TRACER_CLEAR(&old_tracer);
            } else {
                ERTS_TRACER_CLEAR(&new_tracer);
            }
	}
    }
    if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	erts_trace_time_call(p, &ep->info, bp->time);
    }

    /* Restore original continuation pointer (if changed). */
    p->cp = cp;

    func = bif_table[bif_index].f;

    result = func(p, args, I);

    if (erts_nif_export_check_save_trace(p, result,
					 applying, ep,
					 cp, flags,
					 flags_meta, I,
					 meta_tracer)) {
	/*
	 * erts_bif_trace_epilogue() will be called
	 * later when appropriate via the NIF export
	 * scheduling functionality...
	 */
	return result;
    }

    return erts_bif_trace_epilogue(p, result, applying, ep, cp,
				   flags, flags_meta, I,
				   meta_tracer);
}