示例#1
0
static void
release_update_permission(int release_updater)
{
    erts_mtx_lock(&update_table_permission_mtx);
    ASSERT(updater_process != NULL);

    if (release_updater) {
        erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS);
        if (!ERTS_PROC_IS_EXITING(updater_process)) {
            erts_resume(updater_process, ERTS_PROC_LOCK_STATUS);
        }
        erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS);
    }
    updater_process = NULL;

    while (update_queue != NULL) { /* Unleash the entire herd */
	struct update_queue_item* qitem = update_queue;
	erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
	if (!ERTS_PROC_IS_EXITING(qitem->p)) {
	    erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
	}
	erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
	update_queue = qitem->next;
	erts_proc_dec_refc(qitem->p);
	erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem);
    }
    erts_mtx_unlock(&update_table_permission_mtx);
}
示例#2
0
static void
reply_msacc(void *vmsaccrp)
{
    ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET();
    ErtsMSAccReq *msaccrp = (ErtsMSAccReq *) vmsaccrp;

    ASSERT(!msacc || !msacc->unmanaged);

    if (msaccrp->action == ERTS_MSACC_ENABLE && !msacc) {
        msacc = get_msacc();

        msacc->perf_counter = erts_sys_perf_counter();

        msacc->state = ERTS_MSACC_STATE_OTHER;

        ERTS_MSACC_TSD_SET(msacc);

    } else if (msaccrp->action == ERTS_MSACC_DISABLE && msacc) {
        ERTS_MSACC_TSD_SET(NULL);
    } else if (msaccrp->action == ERTS_MSACC_RESET) {
        msacc = msacc ? msacc : get_msacc();
        erts_msacc_reset(msacc);
    } else if (msaccrp->action == ERTS_MSACC_GATHER && !msacc) {
        msacc = get_msacc();
    }

    ASSERT(!msacc || !msacc->unmanaged);

    send_reply(msacc, msaccrp);

    erts_proc_dec_refc(msaccrp->proc);

    if (erts_smp_atomic32_dec_read_nob(&msaccrp->refc) == 0)
      erts_free(ERTS_ALC_T_MSACC, vmsaccrp);
}
示例#3
0
文件: break.c 项目: 3112517927/otp
static void
process_killer(void)
{
    int i, j, max = erts_ptab_max(&erts_proc);
    Process* rp;

    erts_printf("\n\nProcess Information\n\n");
    erts_printf("--------------------------------------------------\n");
    for (i = max-1; i >= 0; i--) {
	rp = erts_pix2proc(i);
	if (rp && rp->i != ENULL) {
	    int br;
	    print_process_info(ERTS_PRINT_STDOUT, NULL, rp);
	    erts_printf("(k)ill (n)ext (r)eturn:\n");
	    while(1) {
		if ((j = sys_get_key(0)) <= 0)
		    erts_exit(0, "");
		switch(j) {
		case 'k': {
		    ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
		    erts_aint32_t state;
		    erts_proc_inc_refc(rp);
		    erts_smp_proc_lock(rp, rp_locks);
		    state = erts_smp_atomic32_read_acqb(&rp->state);
		    if (state & (ERTS_PSFLG_FREE
				 | ERTS_PSFLG_EXITING
				 | ERTS_PSFLG_ACTIVE
				 | ERTS_PSFLG_ACTIVE_SYS
				 | ERTS_PSFLG_IN_RUNQ
				 | ERTS_PSFLG_RUNNING
				 | ERTS_PSFLG_RUNNING_SYS
				 | ERTS_PSFLG_DIRTY_RUNNING
				 | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
			erts_printf("Can only kill WAITING processes this way\n");
		    }
		    else {
			(void) erts_send_exit_signal(NULL,
						     NIL,
						     rp,
						     &rp_locks,
						     am_kill,
						     NIL,
						     NULL,
						     0);
		    }
		    erts_smp_proc_unlock(rp, rp_locks);
		    erts_proc_dec_refc(rp);
		}
		case 'n': br = 1; break;
		case 'r': return;
		default: return;
		}
		if (br == 1) break;
	    }
	}
    }
}
示例#4
0
static void copy_literals_commit(void* null) {
    Process* p = committer_state.stager;
#ifdef DEBUG
    committer_state.stager = NULL;
#endif
    erts_release_code_write_permission();
    erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
    if (!ERTS_PROC_IS_EXITING(p)) {
	erts_resume(p, ERTS_PROC_LOCK_STATUS);
    }
    erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
    erts_proc_dec_refc(p);
}
示例#5
0
文件: erl_flxctr.c 项目: bmk/otp
static void
thr_prg_wake_up_later(void* bin_p)
{
    Binary* bin = bin_p;
    ErtsFlxCtrWakeUpLaterInfo* info = ERTS_MAGIC_BIN_DATA(bin);
    Process* p = info->process;
    /* Resume the requesting process */
    erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
    if (!ERTS_PROC_IS_EXITING(p)) {
        erts_resume(p, ERTS_PROC_LOCK_STATUS);
    }
    erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
    /* Free data */
    erts_proc_dec_refc(p);
    erts_bin_release(bin);
}
示例#6
0
文件: erl_flxctr.c 项目: bmk/otp
static void
thr_prg_wake_up_and_count(void* bin_p)
{
    Binary* bin = bin_p;
    DecentralizedReadSnapshotInfo* info = ERTS_MAGIC_BIN_DATA(bin);
    Process* p = info->process;
    ErtsFlxCtrDecentralizedCtrArray* array = info->array;
    ErtsFlxCtrDecentralizedCtrArray* next = info->next_array;
    int i, sched;
    /* Reset result array */
    for (i = 0; i < info->nr_of_counters; i++) {
        info->result[i] = 0;
    }
    /* Read result from snapshot */
    for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) {
        for (i = 0; i < info->nr_of_counters; i++) {
            info->result[i] = info->result[i] +
                erts_atomic_read_nob(&array->array[sched].counters[i]);
        }
    }
    /* Update the next decentralized counter array */
    for (i = 0; i < info->nr_of_counters; i++) {
        erts_atomic_add_nob(&next->array[0].counters[i], info->result[i]);
    }
    /* Announce that the snapshot is done */
    {
    Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING;
    if (expected != erts_atomic_cmpxchg_mb(&next->snapshot_status,
                                           ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING,
                                           expected)) {
        /* The CAS failed which means that this thread need to free the next array. */
        erts_free(info->alloc_type, next->block_start);
    }
    }
    /* Resume the process that requested the snapshot */
    erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
    if (!ERTS_PROC_IS_EXITING(p)) {
        erts_resume(p, ERTS_PROC_LOCK_STATUS);
    }
    /* Free the memory that is no longer needed */
    erts_free(info->alloc_type, array->block_start);
    erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
    erts_proc_dec_refc(p);
    erts_bin_release(bin);
}
示例#7
0
文件: sys.c 项目: weisslj/otp
static void signal_notify_requested(Eterm type) {
    Process* p = NULL;
    Eterm msg, *hp;
    ErtsProcLocks locks = 0;
    ErlOffHeap *ohp;

    Eterm id = erts_whereis_name_to_id(NULL, am_erl_signal_server);

    if ((p = (erts_pid2proc_opt(NULL, 0, id, 0, ERTS_P2P_FLG_INC_REFC))) != NULL) {
        ErtsMessage *msgp = erts_alloc_message_heap(p, &locks, 3, &hp, &ohp);

        /* erl_signal_server ! {notify, sighup} */
        msg = TUPLE2(hp, am_notify, type);
        erts_queue_message(p, locks, msgp, msg, am_system);

        if (locks)
            erts_smp_proc_unlock(p, locks);
        erts_proc_dec_refc(p);
    }
}
示例#8
0
文件: beam_debug.c 项目: c-bik/otp
static int
dirty_send_message(Process *c_p, Eterm to, Eterm tag)
{
    ErtsProcLocks c_p_locks, rp_locks;
    Process *rp, *real_c_p;
    Eterm msg, *hp;
    ErlOffHeap *ohp;
    ErtsMessage *mp;

    ASSERT(is_immed(tag));

    real_c_p = erts_proc_shadow2real(c_p);
    if (real_c_p != c_p)
	c_p_locks = 0;
    else
	c_p_locks = ERTS_PROC_LOCK_MAIN;

    ASSERT(real_c_p->common.id == c_p->common.id);

    rp = erts_pid2proc_opt(real_c_p, c_p_locks,
			   to, 0,
			   ERTS_P2P_FLG_INC_REFC);

    if (!rp)
	return 0;

    rp_locks = 0;
    mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);

    msg = TUPLE2(hp, tag, c_p->common.id);
    erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);

    if (rp == real_c_p)
	rp_locks &= ~c_p_locks;
    if (rp_locks)
	erts_proc_unlock(rp, rp_locks);

    erts_proc_dec_refc(rp);

    return 1;
}
示例#9
0
文件: code_ix.c 项目: AugHu/otp
void erts_release_code_write_permission(void)
{
    erts_smp_mtx_lock(&code_write_permission_mtx);
    ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
    while (code_write_queue != NULL) { /* unleash the entire herd */
	struct code_write_queue_item* qitem = code_write_queue;
	erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
	if (!ERTS_PROC_IS_EXITING(qitem->p)) {
	    erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
	}
	erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
	code_write_queue = qitem->next;
	erts_proc_dec_refc(qitem->p);
	erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
    }
    code_writing_process = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_tsd_set(has_code_write_permission, (void *) 0);
#endif
    erts_smp_mtx_unlock(&code_write_permission_mtx);
}
示例#10
0
Process *
erts_pid2proc_opt(Process *c_p,
		  ErtsProcLocks c_p_have_locks,
		  Eterm pid,
		  ErtsProcLocks pid_need_locks,
		  int flags)
{
    Process *dec_refc_proc = NULL;
    ErtsThrPrgrDelayHandle dhndl;
    ErtsProcLocks need_locks;
    Uint pix;
    Process *proc;
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
    ErtsProcLocks lcnt_locks;
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
    if (c_p) {
	ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks;
	if (might_unlock)
	    erts_proc_lc_might_unlock(c_p, might_unlock);
    }
#endif

    if (is_not_internal_pid(pid))
	return NULL;
    pix = internal_pid_index(pid);

    ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
    need_locks = pid_need_locks;

    if (c_p && c_p->common.id == pid) {
	ASSERT(c_p->common.id != ERTS_INVALID_PID);
	ASSERT(c_p == erts_pix2proc(pix));

	if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
	    && ERTS_PROC_IS_EXITING(c_p))
	    return NULL;
	need_locks &= ~c_p_have_locks;
	if (!need_locks) {
	    if (flags & ERTS_P2P_FLG_INC_REFC)
		erts_proc_inc_refc(c_p);
	    return c_p;
	}
    }

    dhndl = erts_thr_progress_unmanaged_delay();

    proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix);

    if (proc) {
	if (proc->common.id != pid)
	    proc = NULL;
	else if (!need_locks) {
	    if (flags & ERTS_P2P_FLG_INC_REFC)
		erts_proc_inc_refc(proc);
	}
	else {
	    int busy;

#if ERTS_PROC_LOCK_OWN_IMPL
#ifdef ERTS_ENABLE_LOCK_COUNT
	    lcnt_locks = need_locks;
	    if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
	    	erts_lcnt_proc_lock(&proc->lock, need_locks);
	    }
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
	    /* Make sure erts_pid2proc_safelock() is enough to handle
	       a potential lock order violation situation... */
	    busy = erts_proc_lc_trylock_force_busy(proc, need_locks);
	    if (!busy)
#endif
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
	    {
		/* Try a quick trylock to grab all the locks we need. */
		busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
		erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
		if (!busy)
		    erts_proc_lock_op_debug(proc, need_locks, 1);
#endif
	    }

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
	    if (flags & ERTS_P2P_FLG_TRY_LOCK)
		erts_lcnt_proc_trylock(&proc->lock, need_locks,
				       busy ? EBUSY : 0);
#endif

	    if (!busy) {
		if (flags & ERTS_P2P_FLG_INC_REFC)
		    erts_proc_inc_refc(proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
	    	/* all is great */
	    	if (!(flags & ERTS_P2P_FLG_TRY_LOCK))
		    erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks,
					       __FILE__, __LINE__);
#endif

	    }
	    else {
		if (flags & ERTS_P2P_FLG_TRY_LOCK)
		    proc = ERTS_PROC_LOCK_BUSY;
		else {
		    int managed;
		    if (flags & ERTS_P2P_FLG_INC_REFC)
			erts_proc_inc_refc(proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
		    erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
#endif

		    managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
		    if (!managed) {
			erts_proc_inc_refc(proc);
			erts_thr_progress_unmanaged_continue(dhndl);
			dec_refc_proc = proc;

			/*
			 * We don't want to call
			 * erts_thr_progress_unmanaged_continue()
			 * again.
			 */
			dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED;
		    }

		    proc_safelock(managed,
				  c_p,
				  c_p_have_locks,
				  c_p_have_locks,
				  proc,
				  0,
				  need_locks);
		}
	    }
        }
    }

    if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
	erts_thr_progress_unmanaged_continue(dhndl);

    if (need_locks
	&& proc
	&& proc != ERTS_PROC_LOCK_BUSY
	&& (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
	    ? ERTS_PROC_IS_EXITING(proc)
	    : (proc
	       != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) {

	erts_smp_proc_unlock(proc, need_locks);

	if (flags & ERTS_P2P_FLG_INC_REFC)
	    dec_refc_proc = proc;
	proc = NULL;

    }

    if (dec_refc_proc)
	erts_proc_dec_refc(dec_refc_proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
    ERTS_LC_ASSERT(!proc
		   || proc == ERTS_PROC_LOCK_BUSY
		   || (pid_need_locks ==
		       (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
			& pid_need_locks)));
#endif

    return proc;
}
示例#11
0
static void
proc_safelock(int is_managed,
	      Process *a_proc,
	      ErtsProcLocks a_have_locks,
	      ErtsProcLocks a_need_locks,
	      Process *b_proc,
	      ErtsProcLocks b_have_locks,
	      ErtsProcLocks b_need_locks)
{
    Process *p1, *p2;
#ifdef ERTS_ENABLE_LOCK_CHECK
    Eterm pid1, pid2;
#endif
    ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2;
    ErtsProcLocks unlock_mask;
    int lock_no, refc1 = 0, refc2 = 0;

    ERTS_LC_ASSERT(b_proc);


    /* Determine inter process lock order...
     * Locks with the same lock order should be locked on p1 before p2.
     */
    if (a_proc) {
	if (a_proc->common.id < b_proc->common.id) {
	    p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
	    pid1 = a_proc->common.id;
#endif
	    need_locks1 = a_need_locks;
	    have_locks1 = a_have_locks;
	    p2 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
	    pid2 = b_proc->common.id;
#endif
	    need_locks2 = b_need_locks;
	    have_locks2 = b_have_locks;
	}
	else if (a_proc->common.id > b_proc->common.id) {
	    p1 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
	    pid1 = b_proc->common.id;
#endif
	    need_locks1 = b_need_locks;
	    have_locks1 = b_have_locks;
	    p2 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
	    pid2 = a_proc->common.id;
#endif
	    need_locks2 = a_need_locks;
	    have_locks2 = a_have_locks;
	}
	else {
	    ERTS_LC_ASSERT(a_proc == b_proc);
	    ERTS_LC_ASSERT(a_proc->common.id == b_proc->common.id);
	    p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
	    pid1 = a_proc->common.id;
#endif
	    need_locks1 = a_need_locks | b_need_locks;
	    have_locks1 = a_have_locks | b_have_locks;
	    p2 = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
	    pid2 = 0;
#endif
	    need_locks2 = 0;
	    have_locks2 = 0;
	}
    }
    else {
	p1 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
	pid1 = b_proc->common.id;
#endif
	need_locks1 = b_need_locks;
	have_locks1 = b_have_locks;
	p2 = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
	pid2 = 0;
#endif
	need_locks2 = 0;
	have_locks2 = 0;
#ifdef ERTS_ENABLE_LOCK_CHECK
	a_need_locks = 0;
	a_have_locks = 0;
#endif
    }

#ifdef ERTS_ENABLE_LOCK_CHECK
    if (p1)
	erts_proc_lc_chk_proc_locks(p1, have_locks1);
    if (p2)
	erts_proc_lc_chk_proc_locks(p2, have_locks2);

    if ((need_locks1 & have_locks1) != have_locks1)
	erts_lc_fail("Thread tries to release process lock(s) "
		     "on %T via erts_proc_safelock().", pid1);
    if ((need_locks2 & have_locks2) != have_locks2)
	erts_lc_fail("Thread tries to release process lock(s) "
		     "on %T via erts_proc_safelock().",
		     pid2);
#endif


    need_locks1 &= ~have_locks1;
    need_locks2 &= ~have_locks2;

    /* Figure out the range of locks that needs to be unlocked... */
    unlock_mask = ERTS_PROC_LOCKS_ALL;
    for (lock_no = 0;
	 lock_no <= ERTS_PROC_LOCK_MAX_BIT;
	 lock_no++) {
	ErtsProcLocks lock = (1 << lock_no);
	if (lock & need_locks1)
	    break;
	unlock_mask &= ~lock;
	if (lock & need_locks2)
	    break;
    }

    /* ... and unlock locks in that range... */
    if (have_locks1 || have_locks2) {
	ErtsProcLocks unlock_locks;
	unlock_locks = unlock_mask & have_locks1;
	if (unlock_locks) {
	    have_locks1 &= ~unlock_locks;
	    need_locks1 |= unlock_locks;
	    if (!is_managed && !have_locks1) {
		refc1 = 1;
		erts_proc_inc_refc(p1);
	    }
	    erts_smp_proc_unlock(p1, unlock_locks);
	}
	unlock_locks = unlock_mask & have_locks2;
	if (unlock_locks) {
	    have_locks2 &= ~unlock_locks;
	    need_locks2 |= unlock_locks;
	    if (!is_managed && !have_locks2) {
		refc2 = 1;
		erts_proc_inc_refc(p2);
	    }
	    erts_smp_proc_unlock(p2, unlock_locks);
	}
    }

    /*
     * lock_no equals the number of the first lock to lock on
     * either p1 *or* p2.
     */


#ifdef ERTS_ENABLE_LOCK_CHECK
    if (p1)
	erts_proc_lc_chk_proc_locks(p1, have_locks1);
    if (p2)
	erts_proc_lc_chk_proc_locks(p2, have_locks2);
#endif

    /* Lock locks in lock order... */
    while (lock_no <= ERTS_PROC_LOCK_MAX_BIT) {
	ErtsProcLocks locks;
	ErtsProcLocks lock = (1 << lock_no);
	ErtsProcLocks lock_mask = 0;
	if (need_locks1 & lock) {
	    do {
		lock = (1 << lock_no++);
		lock_mask |= lock;
	    } while (lock_no <= ERTS_PROC_LOCK_MAX_BIT
		     && !(need_locks2 & lock));
	    if (need_locks2 & lock)
		lock_no--;
	    locks = need_locks1 & lock_mask;
	    erts_smp_proc_lock(p1, locks);
	    have_locks1 |= locks;
	    need_locks1 &= ~locks;
	}
	else if (need_locks2 & lock) {
	    while (lock_no <= ERTS_PROC_LOCK_MAX_BIT
		   && !(need_locks1 & lock)) {
		lock_mask |= lock;
		lock = (1 << ++lock_no);
	    }
	    locks = need_locks2 & lock_mask;
	    erts_smp_proc_lock(p2, locks);
	    have_locks2 |= locks;
	    need_locks2 &= ~locks;
	}
	else
	    lock_no++;
    }

#ifdef ERTS_ENABLE_LOCK_CHECK
    if (p1)
	erts_proc_lc_chk_proc_locks(p1, have_locks1);
    if (p2)
	erts_proc_lc_chk_proc_locks(p2, have_locks2);

    if (p1 && p2) {
	if (p1 == a_proc) {
	    ERTS_LC_ASSERT(a_need_locks == have_locks1);
	    ERTS_LC_ASSERT(b_need_locks == have_locks2);
	}
	else {
	    ERTS_LC_ASSERT(a_need_locks == have_locks2);
	    ERTS_LC_ASSERT(b_need_locks == have_locks1);
	}
    }
    else {
	ERTS_LC_ASSERT(p1);
	if (a_proc) {
	    ERTS_LC_ASSERT(have_locks1 == (a_need_locks | b_need_locks));
	}
	else {
	    ERTS_LC_ASSERT(have_locks1 == b_need_locks);
	}
    }
#endif

    if (!is_managed) {
	if (refc1)
	    erts_proc_dec_refc(p1);
	if (refc2)
	    erts_proc_dec_refc(p2);
    }
}