示例#1
0
static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
{
#ifdef USE_VM_PROBES
    int len;
#endif

    if (is_internal_port(a->port)) {
#if ERTS_USE_ASYNC_READY_Q
	ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id);
	a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q);
#endif
	/* make sure the driver will stay around */
	if (a->hndl)
	    erts_ddll_reference_referenced_driver(a->hndl);
    }

#if ERTS_ASYNC_PRINT_JOB
    erts_fprintf(stderr, "-> %ld\n", a->async_id);
#endif

    erts_thr_q_enqueue(&q->thr_q, a);
#ifdef USE_VM_PROBES
    if (DTRACE_ENABLED(aio_pool_add)) {
        DTRACE_CHARBUF(port_str, 16);

        erts_snprintf(port_str, sizeof(port_str), "%T", a->port);
        /* DTRACE TODO: Get the queue length from erts_thr_q_enqueue() ? */
        len = -1;
        DTRACE2(aio_pool_add, port_str, len);
    }
    gcc_optimizer_hack++;
#endif
}
示例#2
0
static void async_add(ErlAsync* a, AsyncQueue* q)
{
    /* XXX:PaN Is this still necessary when ports lock drivers? */
    if (is_internal_port(a->port)) {
        ERTS_LC_ASSERT(erts_drvportid2port(a->port));
        /* make sure the driver will stay around */
        driver_lock_driver(internal_port_index(a->port));
    }

    erts_mtx_lock(&q->mtx);

    if (q->len == 0) {
        q->head = a;
        q->tail = a;
        q->len = 1;
        erts_cnd_signal(&q->cv);
    }
    else { /* no need to signal (since the worker is working) */
        a->next = q->head;
        q->head->prev = a;
        q->head = a;
        q->len++;
    }
    erts_mtx_unlock(&q->mtx);
}
示例#3
0
void
erts_set_dist_entry_not_connected(DistEntry *dep)
{
    ERTS_SMP_LC_ASSERT(erts_lc_is_dist_entry_locked(dep));
    erts_smp_mtx_lock(&erts_dist_table_mtx);

    ASSERT(dep != erts_this_dist_entry);
    ASSERT(is_internal_port(dep->cid));

    if(dep->flags & DFLAG_PUBLISHED) {
	if(dep->prev) {
	    ASSERT(is_in_de_list(dep, erts_visible_dist_entries));
	    dep->prev->next = dep->next;
	}
	else {
	    ASSERT(erts_visible_dist_entries == dep);
	    erts_visible_dist_entries = dep->next;
	}

	ASSERT(erts_no_of_visible_dist_entries > 0);
	erts_no_of_visible_dist_entries--;
    }
    else {
	if(dep->prev) {
	    ASSERT(is_in_de_list(dep, erts_hidden_dist_entries));
	    dep->prev->next = dep->next;
	}
	else {
	    ASSERT(erts_hidden_dist_entries == dep);
	    erts_hidden_dist_entries = dep->next;
	}

	ASSERT(erts_no_of_hidden_dist_entries > 0);
	erts_no_of_hidden_dist_entries--;
    }

    if(dep->next)
	dep->next->prev = dep->prev;

    dep->status &= ~ERTS_DE_SFLG_CONNECTED;
    dep->flags = 0;
    dep->prev = NULL;
    dep->cid = NIL;

    dep->next = erts_not_connected_dist_entries;
    if(erts_not_connected_dist_entries) {
	ASSERT(erts_not_connected_dist_entries->prev == NULL);
	erts_not_connected_dist_entries->prev = dep;
    }
    erts_not_connected_dist_entries = dep;
    erts_no_of_not_connected_dist_entries++;
    erts_smp_mtx_unlock(&erts_dist_table_mtx);
}
示例#4
0
void
erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags)
{
    ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
    erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);

    ASSERT(dep != erts_this_dist_entry);
    ASSERT(is_nil(dep->cid));
    ASSERT(is_internal_port(cid));

    if(dep->prev) {
	ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries));
	dep->prev->next = dep->next;
    }
    else {
	ASSERT(erts_not_connected_dist_entries == dep);
	erts_not_connected_dist_entries = dep->next;
    }

    if(dep->next)
	dep->next->prev = dep->prev;

    ASSERT(erts_no_of_not_connected_dist_entries > 0);
    erts_no_of_not_connected_dist_entries--;

    dep->status |= ERTS_DE_SFLG_CONNECTED;
    dep->flags = flags;
    dep->cid = cid;
    dep->connection_id++;
    dep->connection_id &= ERTS_DIST_EXT_CON_ID_MASK;
    dep->prev = NULL;

    if(flags & DFLAG_PUBLISHED) {
	dep->next = erts_visible_dist_entries;
	if(erts_visible_dist_entries) {
	    ASSERT(erts_visible_dist_entries->prev == NULL);
	    erts_visible_dist_entries->prev = dep;
	}
	erts_visible_dist_entries = dep;
	erts_no_of_visible_dist_entries++;
    }
    else {
	dep->next = erts_hidden_dist_entries;
	if(erts_hidden_dist_entries) {
	    ASSERT(erts_hidden_dist_entries->prev == NULL);
	    erts_hidden_dist_entries->prev = dep;
	}
	erts_hidden_dist_entries = dep;
	erts_no_of_hidden_dist_entries++;
    }
    erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
}
示例#5
0
文件: register.c 项目: hawk/otp
Eterm
erts_whereis_name_to_id(Process *c_p, Eterm name)
{
    Eterm res = am_undefined;
    HashValue hval;
    int ix;
    HashBucket* b;
#ifdef ERTS_SMP
    ErtsProcLocks c_p_locks = c_p ? ERTS_PROC_LOCK_MAIN : 0;

#ifdef ERTS_ENABLE_LOCK_CHECK
    if (c_p) ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
#endif

    reg_safe_read_lock(c_p, &c_p_locks);
    if (c_p && !c_p_locks)
        erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
#endif

    hval = REG_HASH(name);
    ix = hval % process_reg.size;
    b = process_reg.bucket[ix];

    /*
     * Note: We have inlined the code from hash.c for speed.
     */
	
    while (b) {
	RegProc* rp = (RegProc *) b;
	if (rp->name == name) {
	    /*
	     * SMP NOTE: No need to lock registered entity since it cannot
	     * be removed without acquiring write reg lock and id on entity
	     * is read only.
	     */
	    if (rp->p)
		res = rp->p->common.id;
	    else if (rp->pt)
		res = rp->pt->common.id;
	    break;
	}
	b = b->next;
    }

    reg_read_unlock();

    ASSERT(is_internal_pid(res) || is_internal_port(res) || res==am_undefined);

    return res;
}
示例#6
0
BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
{
    Eterm retval;
    Port* prt;

    if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) {
	prt = sig_lookup_port(BIF_P, BIF_ARG_1);
	if (!prt)
	    BIF_RET(am_undefined);
    }
    else if (is_external_port(BIF_ARG_1)) {
	if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
	    BIF_RET(am_undefined);
	else
	    BIF_RET(am_badarg);
    }
    else {
	BIF_RET(am_badarg);
    }

    switch (erts_port_info(BIF_P, prt, BIF_ARG_2, &retval)) {
    case ERTS_PORT_OP_CALLER_EXIT:
    case ERTS_PORT_OP_BADARG:
	BIF_RET(am_badarg);
    case ERTS_PORT_OP_DROPPED:
	BIF_RET(am_undefined);
    case ERTS_PORT_OP_SCHEDULED:
	ASSERT(is_internal_ordinary_ref(retval));
	BIF_RET(retval);
    case ERTS_PORT_OP_DONE:
	ASSERT(is_not_internal_ref(retval));
	BIF_RET(retval);
    default:
	ERTS_INTERNAL_ERROR("Unexpected erts_port_info() result");
	BIF_RET(am_internal_error);
    }
}
示例#7
0
int
erts_port_task_schedule(Eterm id,
			ErtsPortTaskHandle *pthp,
			ErtsPortTaskType type,
			ErlDrvEvent event,
			ErlDrvEventData event_data)
{
    ErtsRunQueue *runq;
    Port *pp;
    ErtsPortTask *ptp;
    int enq_port = 0;

    /*
     * NOTE:	We might not have the port lock here. We are only
     *		allowed to access the 'sched', 'tab_status',
     *          and 'id' fields of the port struct while
     *          tasks_lock is held.
     */

    if (pthp && erts_port_task_is_scheduled(pthp)) {
	ASSERT(0);
	erts_port_task_abort(id, pthp);
    }

    ptp = port_task_alloc();

    ASSERT(is_internal_port(id));
    pp = &erts_port[internal_port_index(id)];
    runq = erts_port_runq(pp);

    if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) {
	if (runq)
	    erts_smp_runq_unlock(runq);
	return -1;
    }

    ASSERT(!erts_port_task_is_scheduled(pthp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    if (!pp->sched.taskq) {
	pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp);
	enq_port = !pp->sched.exe_taskq;
    }

#ifdef ERTS_SMP
    if (enq_port) {
	ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (xrunq) {
	    /* Port emigrated ... */
	    erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
	    erts_smp_runq_unlock(runq);
	    runq = xrunq;
	}
    }
#endif

    ASSERT(!enq_port || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED));

    ASSERT(pp->sched.taskq);
    ASSERT(ptp);

    ptp->type = type;
    ptp->event = event;
    ptp->event_data = event_data;

    set_handle(ptp, pthp);

    switch (type) {
    case ERTS_PORT_TASK_FREE:
	erl_exit(ERTS_ABORT_EXIT,
		 "erts_port_task_schedule(): Cannot schedule free task\n");
	break;
    case ERTS_PORT_TASK_INPUT:
    case ERTS_PORT_TASK_OUTPUT:
    case ERTS_PORT_TASK_EVENT:
	erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
	/* Fall through... */
    default:
	enqueue_task(pp->sched.taskq, ptp);
	break;
    }

#ifndef ERTS_SMP
    /*
     * When (!enq_port && !pp->sched.exe_taskq) is true in the smp case,
     * the port might not be in the run queue. If this is the case, another
     * thread is in the process of enqueueing the port. This very seldom
     * occur, but do occur and is a valid scenario. Debug info showing this
     * enqueue in progress must be introduced before we can enable (modified
     * versions of these) assertions in the smp case again.
     */
#if defined(HARD_DEBUG)
    if (pp->sched.exe_taskq || enq_port)
	ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp);
    else
	ERTS_PT_CHK_IN_PORTQ(runq, pp);
#elif defined(DEBUG)
    if (!enq_port && !pp->sched.exe_taskq) {
	/* We should be in port run q */
	ASSERT(pp->sched.prev || runq->ports.start == pp);
    }
#endif
#endif

    if (!enq_port) {
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
	erts_smp_runq_unlock(runq);
    }
    else {
	enqueue_port(runq, pp);
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
	    
	if (erts_system_profile_flags.runnable_ports) {
	    profile_runnable_port(pp, am_active);
	}

	erts_smp_runq_unlock(runq);

	erts_smp_notify_inc_runq(runq);
    }
    return 0;
}
示例#8
0
static Eterm
reference_table_term(Uint **hpp, Uint *szp)
{
#undef  MK_2TUP
#undef  MK_3TUP
#undef  MK_CONS
#undef  MK_UINT
#define MK_2TUP(E1, E2)		erts_bld_tuple(hpp, szp, 2, (E1), (E2))
#define MK_3TUP(E1, E2, E3)	erts_bld_tuple(hpp, szp, 3, (E1), (E2), (E3))
#define MK_CONS(CAR, CDR)	erts_bld_cons(hpp, szp, (CAR), (CDR))
#define MK_UINT(UI)		erts_bld_uint(hpp, szp, (UI))
    int i;
    Eterm tup;
    Eterm tup2;
    Eterm nl = NIL;
    Eterm dl = NIL;
    Eterm nrid;

    for(i = 0; i < no_referred_nodes; i++) {
	NodeReferrer *nrp;
	Eterm nril = NIL;

	for(nrp = referred_nodes[i].referrers; nrp; nrp = nrp->next) {
	    Eterm nrl = NIL;
	    /* NodeReferenceList = [{ReferenceType,References}] */
	    if(nrp->heap_ref) {
		tup = MK_2TUP(AM_heap, MK_UINT(nrp->heap_ref));
		nrl = MK_CONS(tup, nrl);
	    }
	    if(nrp->link_ref) {
		tup = MK_2TUP(AM_link, MK_UINT(nrp->link_ref));
		nrl = MK_CONS(tup, nrl);
	    }
	    if(nrp->monitor_ref) {
		tup = MK_2TUP(AM_monitor, MK_UINT(nrp->monitor_ref));
		nrl = MK_CONS(tup, nrl);
	    }
	    if(nrp->ets_ref) {
		tup = MK_2TUP(AM_ets, MK_UINT(nrp->ets_ref));
		nrl = MK_CONS(tup, nrl);
	    }
	    if(nrp->bin_ref) {
		tup = MK_2TUP(AM_binary, MK_UINT(nrp->bin_ref));
		nrl = MK_CONS(tup, nrl);
	    }
	    if(nrp->timer_ref) {
		tup = MK_2TUP(AM_timer, MK_UINT(nrp->timer_ref));
		nrl = MK_CONS(tup, nrl);
	    }
	    if(nrp->system_ref) {
		tup = MK_2TUP(AM_system, MK_UINT(nrp->system_ref));
		nrl = MK_CONS(tup, nrl);
	    }

	    nrid = nrp->id;
	    if (!IS_CONST(nrp->id)) {

		Uint nrid_sz = size_object(nrp->id);
		if (szp)
		    *szp += nrid_sz;
		if (hpp)
		    nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL);
	    }

	    if (is_internal_pid(nrid) || nrid == am_error_logger) {
		ASSERT(!nrp->ets_ref && !nrp->bin_ref && !nrp->system_ref);
		tup = MK_2TUP(AM_process, nrid);
	    }
	    else if (is_tuple(nrid)) {
		Eterm *t;
		ASSERT(!nrp->ets_ref && !nrp->bin_ref);
		t = tuple_val(nrid);
		ASSERT(2 == arityval(t[0]));
		tup = MK_2TUP(t[1], t[2]);
	    }
	    else if(is_internal_port(nrid)) {
		ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref
		       && !nrp->timer_ref && !nrp->system_ref);
		tup = MK_2TUP(AM_port, nrid);
	    }
	    else if(nrp->ets_ref) {
		ASSERT(!nrp->heap_ref && !nrp->link_ref && 
		       !nrp->monitor_ref && !nrp->bin_ref
		       && !nrp->timer_ref && !nrp->system_ref);
		tup = MK_2TUP(AM_ets, nrid);
	    }
	    else if(nrp->bin_ref) {
		ASSERT(is_small(nrid) || is_big(nrid));
		ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->link_ref && 
		       !nrp->monitor_ref && !nrp->timer_ref
		       && !nrp->system_ref);
		tup = MK_2TUP(AM_match_spec, nrid);
	    }
	    else  {
		ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref);
		ASSERT(is_atom(nrid));
		tup = MK_2TUP(AM_dist, nrid);
	    }
	    tup = MK_2TUP(tup, nrl);
	    /* NodeReferenceIdList = [{{ReferrerType, ID}, NodeReferenceList}] */
	    nril = MK_CONS(tup, nril);
	}

	/* NodeList = [{{Node, Creation}, Refc, NodeReferenceIdList}] */

	tup = MK_2TUP(referred_nodes[i].node->sysname,
		      MK_UINT(referred_nodes[i].node->creation));
	tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 1)), nril);
	nl = MK_CONS(tup, nl);
    }

    for(i = 0; i < no_referred_dists; i++) {
	DistReferrer *drp;
	Eterm dril = NIL;
	for(drp = referred_dists[i].referrers; drp; drp = drp->next) {
	    Eterm drl = NIL;

	    /* DistReferenceList = [{ReferenceType,References}] */
	    if(drp->node_ref) {
		tup = MK_2TUP(AM_node, MK_UINT(drp->node_ref));
		drl = MK_CONS(tup, drl);
	    }
	    if(drp->ctrl_ref) {
		tup = MK_2TUP(AM_control, MK_UINT(drp->ctrl_ref));
		drl = MK_CONS(tup, drl);
	    }

	    if (is_internal_pid(drp->id)) {
		ASSERT(drp->ctrl_ref && !drp->node_ref);
		tup = MK_2TUP(AM_process, drp->id);
	    }
	    else if(is_internal_port(drp->id)) {
		ASSERT(drp->ctrl_ref && !drp->node_ref);
		tup = MK_2TUP(AM_port, drp->id);
	    }
	    else {
		ASSERT(!drp->ctrl_ref && drp->node_ref);
		ASSERT(is_atom(drp->id));
		tup = MK_2TUP(drp->id, MK_UINT(drp->creation));
		tup = MK_2TUP(AM_node, tup);
	    }

	    tup = MK_2TUP(tup, drl);

	    /* DistReferenceIdList =
	       [{{ReferrerType, ID}, DistReferenceList}] */
	    dril = MK_CONS(tup, dril);

	}

	/* DistList = [{Dist, Refc, ReferenceIdList}] */
	tup = MK_3TUP(referred_dists[i].dist->sysname,
		      MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 1)),
		      dril);
	dl = MK_CONS(tup, dl);
    }

    /* {{node_references, NodeList}, {dist_references, DistList}} */

    tup = MK_2TUP(AM_node_references, nl);
    tup2 = MK_2TUP(AM_dist_references, dl);
    tup = MK_2TUP(tup, tup2);

    return tup;
#undef  MK_2TUP
#undef  MK_3TUP
#undef  MK_CONS
#undef  MK_UINT

}
示例#9
0
int
erts_port_task_schedule(Eterm id,
			ErtsPortTaskHandle *pthp,
			ErtsPortTaskType type,
			ErlDrvEvent event,
			ErlDrvEventData event_data)
{
    ErtsRunQueue *runq;
    Port *pp;
    ErtsPortTask *ptp;
    int enq_port = 0;

    /*
     * NOTE:	We might not have the port lock here. We are only
     *		allowed to access the 'sched', 'tab_status',
     *          and 'id' fields of the port struct while
     *          tasks_lock is held.
     */

    if (pthp && erts_port_task_is_scheduled(pthp)) {
	ASSERT(0);
	erts_port_task_abort(id, pthp);
    }

    ptp = port_task_alloc();

    ASSERT(is_internal_port(id));
    pp = &erts_port[internal_port_index(id)];
    runq = erts_port_runq(pp);

    if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) {
	if (runq)
	    erts_smp_runq_unlock(runq);
	return -1;
    }

    ASSERT(!erts_port_task_is_scheduled(pthp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
#ifdef DEBUG
    /* If we have a taskq and not executing, we should be in port run q */  
    if (pp->sched.taskq && !pp->sched.exe_taskq) {
	ASSERT(pp->sched.prev || runq->ports.start == pp);
    }
#endif

    if (!pp->sched.taskq) {
	pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp);
	enq_port = !pp->sched.exe_taskq;
    }

#ifdef ERTS_SMP
    if (enq_port) {
	ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (xrunq) {
	    /* Port emigrated ... */
	    erts_smp_atomic_set(&pp->run_queue, (long) xrunq);
	    erts_smp_runq_unlock(runq);
	    runq = xrunq;
	}
    }
#endif

    ASSERT(!(runq->flags & ERTS_RUNQ_FLG_SUSPENDED));

    ASSERT(pp->sched.taskq);
    ASSERT(ptp);

    ptp->type = type;
    ptp->event = event;
    ptp->event_data = event_data;

    set_handle(ptp, pthp);

    switch (type) {
    case ERTS_PORT_TASK_FREE:
	erl_exit(ERTS_ABORT_EXIT,
		 "erts_port_task_schedule(): Cannot schedule free task\n");
	break;
    case ERTS_PORT_TASK_INPUT:
    case ERTS_PORT_TASK_OUTPUT:
    case ERTS_PORT_TASK_EVENT:
	erts_smp_atomic_inc(&erts_port_task_outstanding_io_tasks);
	/* Fall through... */
    default:
	enqueue_task(pp->sched.taskq, ptp);
	break;
    }

#if defined(HARD_DEBUG)
    if (pp->sched.exe_taskq || enq_port)
	ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp);
    else
	ERTS_PT_CHK_IN_PORTQ(runq, pp);
#elif defined(DEBUG)
    if (!enq_port && !pp->sched.exe_taskq) {
	/* We should be in port run q */
	ASSERT(pp->sched.prev || runq->ports.start == pp);
    }
#endif

    if (!enq_port) {
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    }
    else {
	enqueue_port(runq, pp);
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
	    
	if (erts_system_profile_flags.runnable_ports) {
	    profile_runnable_port(pp, am_active);
	}

	erts_smp_notify_inc_runq(runq);
    }
    erts_smp_runq_unlock(runq);
    return 0;
}
示例#10
0
文件: register.c 项目: hawk/otp
/*
 * Register a process or port (can't be registered twice).
 * Returns 0 if name, process or port is already registered.
 *
 * When smp support is enabled:
 *   * Assumes that main lock is locked (and only main lock)
 *     on c_p.
 *
 */
int erts_register_name(Process *c_p, Eterm name, Eterm id)
{
    int res = 0;
    Process *proc = NULL;
    Port *port = NULL;
    RegProc r, *rp;
    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);

    if (is_not_atom(name) || name == am_undefined)
	return res;

    if (c_p->common.id == id) /* A very common case I think... */
	proc = c_p;
    else {
	if (is_not_internal_pid(id) && is_not_internal_port(id))
	    return res;
	erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
	if (is_internal_port(id)) {
	    port = erts_id2port(id);
	    if (!port)
		goto done;
	}
    }

#ifdef ERTS_SMP
    {
	ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0;
	reg_safe_write_lock(proc, &proc_locks);

	if (proc && !proc_locks)
	    erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
    }
#endif

    if (is_internal_pid(id)) {
	if (!proc)
	    proc = erts_pid2proc(NULL, 0, id, ERTS_PROC_LOCK_MAIN);
	r.p = proc;
	if (!proc)
	    goto done;
	if (proc->common.u.alive.reg)
	    goto done;
	r.pt = NULL;
    }
    else {
	ASSERT(!INVALID_PORT(port, id));
	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
	r.pt = port;
	if (r.pt->common.u.alive.reg)
	    goto done;
	r.p = NULL;
    }

    r.name = name;
    
    rp = (RegProc*) hash_put(&process_reg, (void*) &r);
    if (proc && rp->p == proc) {
	if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
	    trace_proc(proc, ERTS_PROC_LOCK_MAIN,
                       proc, am_register, name);
	}
	proc->common.u.alive.reg = rp;
    }
    else if (port && rp->pt == port) {
    	if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
		trace_port(port, am_register, name);
	}
	port->common.u.alive.reg = rp;
    }

    if ((rp->p && rp->p->common.id == id)
	|| (rp->pt && rp->pt->common.id == id)) {
	res = 1;
    }

 done:
    reg_write_unlock();
    if (port)
	erts_port_release(port);
    if (c_p != proc) {
	if (proc)
	    erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
	erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
    }
    return res;
}