コード例 #1
0
ファイル: erl_process_lock.c プロジェクト: AugHu/otp
void erts_proc_lc_chk_only_proc_main(Process *p)
{
#if ERTS_PROC_LOCK_OWN_IMPL
    erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
						 p->common.id,
						 ERTS_LC_FLG_LT_PROCLOCK);
    erts_lc_check_exact(&proc_main, 1);
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
    erts_lc_check_exact(&p->lock.main.lc, 1);
#endif
}
コード例 #2
0
ファイル: erl_thr_progress.c プロジェクト: Bufias/otp
void
erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
{
    ErtsThrPrgrData *tpd = thr_prgr_data(esdp);

#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif

    ERTS_THR_PROGRESS_STATE_DEBUG_SET_ACTIVE(tpd->id, on);

    if (on) {
	ASSERT(!tpd->active);
	tpd->active = 1;
	erts_atomic32_inc_nob(&intrnl->misc.data.lflgs);
    }
    else {
	ASSERT(tpd->active);
	tpd->active = 0;
	erts_atomic32_dec_nob(&intrnl->misc.data.lflgs);
	if (update(tpd))
	    leader_update(tpd);
    }

#ifdef DEBUG
    {
	erts_aint32_t n = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	n &= ERTS_THR_PRGR_LFLG_ACTIVE_MASK;
	ASSERT(tpd->active <= n && n <= intrnl->managed.no);
    }
#endif

}
コード例 #3
0
ファイル: erl_thr_progress.c プロジェクト: Bufias/otp
void
erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
{
    ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
    ErtsThrPrgrVal current, val;

#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif

    /*
     * We aren't allowed to continue until our thread
     * progress is past global current.
     */
    val = current = read_acqb(&erts_thr_prgr__.current);
    while (1) {
	val++;
	if (val == ERTS_THR_PRGR_VAL_WAITING)
	    val = 0;
	tpd->confirmed = val;
	set_mb(&intrnl->thr[tpd->id].data.current, val);
	val = read_acqb(&erts_thr_prgr__.current);
	if (current == val)
	    break;
	current = val;
    }
    if (block_count_inc())
	block_thread(tpd);
    if (update(tpd))
	leader_update(tpd);
}
コード例 #4
0
ファイル: erl_thr_progress.c プロジェクト: Bufias/otp
void
erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
{
    erts_aint32_t lflgs;
    ErtsThrPrgrData *tpd = thr_prgr_data(esdp);

#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif

    block_count_dec();

    tpd->confirmed = ERTS_THR_PRGR_VAL_WAITING;
    set_mb(&intrnl->thr[tpd->id].data.current, ERTS_THR_PRGR_VAL_WAITING);

    lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);

    if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
		  | ERTS_THR_PRGR_LFLG_WAITING_UM
		  | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
	== ERTS_THR_PRGR_LFLG_NO_LEADER 
	&& got_sched_wakeups()) {
	/* Someone need to make progress */
	wakeup_managed(0);
    }
}
コード例 #5
0
ファイル: erl_process_lock.c プロジェクト: adi2188/otp
void erts_proc_lc_chk_only_proc_main(Process *p)
{
    erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
						 p->id,
						 ERTS_LC_FLG_LT_PROCLOCK);
    erts_lc_check_exact(&proc_main, 1);
}
コード例 #6
0
ファイル: beam_bif_load.c プロジェクト: system/erlang-otp
static void 
delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp)
{
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_SMP
    if (c_p && c_p_locks)
	erts_proc_lc_chk_only_proc_main(c_p);
    else
#endif
	erts_lc_check_exact(NULL, 0);
#endif

    /*
     * Clear breakpoints if any
     */
    if (modp->code != NULL && modp->code[MI_NUM_BREAKPOINTS] > 0) {
	if (c_p && c_p_locks)
	    erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
	erts_smp_block_system(0);
	erts_clear_module_break(modp);
	modp->code[MI_NUM_BREAKPOINTS] = 0;
	erts_smp_release_system();
	if (c_p && c_p_locks)
	    erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
    }
    modp->old_code = modp->code;
    modp->old_code_length = modp->code_length;
    modp->old_catches = modp->catches;
    modp->code = NULL;
    modp->code_length = 0;
    modp->catches = BEAM_CATCHES_NIL;
}
コード例 #7
0
ファイル: erl_init.c プロジェクト: DavidPajaro/otp
static void
system_cleanup(int flush_async)
{
    /*
     * Make sure only one thread exits the runtime system.
     */
    if (erts_atomic_inc_read_nob(&exiting) != 1) {
	/*
	 * Another thread is currently exiting the system;
	 * wait for it to do its job.
	 */
#ifdef ERTS_SMP
	if (erts_thr_progress_is_managed_thread()) {
	    /*
	     * The exiting thread might be waiting for
	     * us to block; need to update status...
	     */
	    erts_thr_progress_active(NULL, 0);
	    erts_thr_progress_prepare_wait(NULL);
	}
#endif
	/* Wait forever... */
	while (1)
	    erts_milli_sleep(10000000);
    }

    /* No cleanup wanted if ...
     * 1. we are about to do an abnormal exit
     * 2. we haven't finished initializing, or
     * 3. another thread than the main thread is performing the exit
     *    (in threaded non smp case).
     */

    if (!flush_async
	|| !erts_initialized
#if defined(USE_THREADS) && !defined(ERTS_SMP)
	|| !erts_equal_tids(main_thread, erts_thr_self())
#endif
	)
	return;

#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
#endif

    erts_exit_flush_async();
}
コード例 #8
0
ファイル: erl_thr_progress.c プロジェクト: Bufias/otp
static ERTS_INLINE int
leader_update(ErtsThrPrgrData *tpd)
{
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
    if (!tpd->leader) {
	/* Probably need to block... */
	block_thread(tpd);
    }
    else {
	ErtsThrPrgrVal current;
	int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged;
	erts_aint32_t lflgs;
	ErtsThrPrgrVal next;
	erts_aint_t refc;

	my_ix = tpd->id;

	if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) {
	    /* Took over as leader from another thread */
	    tpd->leader_state.current = read_nob(&erts_thr_prgr__.current);
	    tpd->leader_state.next = tpd->leader_state.current;
	    tpd->leader_state.next++;
	    if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING)
		tpd->leader_state.next = 0;
	    tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix;
	    tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting;
	    tpd->leader_state.umrefc_ix.current =
		(int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current);

	    if (tpd->confirmed == tpd->leader_state.current) {
		ErtsThrPrgrVal val = tpd->leader_state.current + 1;
		if (val == ERTS_THR_PRGR_VAL_WAITING)
		    val = 0;
		tpd->confirmed = val;
		set_mb(&intrnl->thr[my_ix].data.current, val);
	    }
	}


	next = tpd->leader_state.next;

	waiting_unmanaged = 0;
	umrefc_ix = -1; /* Shut up annoying warning */

	chk_next_ix = tpd->leader_state.chk_next_ix;
	no_managed = intrnl->managed.no;
	ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed);
	/* Check manged threads */
	if (chk_next_ix < no_managed) {
	    for (ix = chk_next_ix; ix < no_managed; ix++) {
		ErtsThrPrgrVal tmp;
		if (ix == my_ix)
		    continue;
		tmp = read_nob(&intrnl->thr[ix].data.current);
		if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
		    tpd->leader_state.chk_next_ix = ix;
		    ASSERT(erts_thr_progress_has_passed__(next, tmp));
		    goto done;
		}
	    }
	}

	/* Check unmanged threads */
	waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1;
	umrefc_ix = (waiting_unmanaged
		     ? tpd->leader_state.umrefc_ix.waiting
		     : tpd->leader_state.umrefc_ix.current);
	refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
	ASSERT(refc >= 0);
	if (refc != 0) {
	    int new_umrefc_ix;

	    if (waiting_unmanaged)
		goto done;

	    new_umrefc_ix = (umrefc_ix + 1) & 0x1;
	    tpd->leader_state.umrefc_ix.waiting = umrefc_ix;
	    tpd->leader_state.chk_next_ix = no_managed;
	    erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current,
				  (erts_aint32_t) new_umrefc_ix);
	    ETHR_MEMBAR(ETHR_StoreLoad);
	    refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
	    ASSERT(refc >= 0);
	    waiting_unmanaged = 1;
	    if (refc != 0)
		goto done;
	}

	/* Make progress */
	current = next;

	next++;
	if (next == ERTS_THR_PRGR_VAL_WAITING)
	    next = 0;

	set_nob(&intrnl->thr[my_ix].data.current, next);
	set_mb(&erts_thr_prgr__.current, current);
	tpd->confirmed = next;
	tpd->leader_state.next = next;
	tpd->leader_state.current = current;

#if ERTS_THR_PRGR_PRINT_VAL
	if (current % 1000 == 0)
	    erts_fprintf(stderr, "%b64u\n", current);
#endif
	handle_wakeup_requests(current);

	if (waiting_unmanaged) {
	    waiting_unmanaged = 0;
	    tpd->leader_state.umrefc_ix.waiting = -1;
	    erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs,
					~ERTS_THR_PRGR_LFLG_WAITING_UM);
	}
	tpd->leader_state.chk_next_ix = 0;

    done:

	if (tpd->active) {
	    lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		(void) block_thread(tpd);
	}
	else {
	    int force_wakeup_check = 0;
	    erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER;
	    tpd->leader = 0;
	    tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING;
#if ERTS_THR_PRGR_PRINT_LEADER
	    erts_fprintf(stderr, "L <- %d\n", tpd->id);
#endif

	    ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0);

	    intrnl->misc.data.umrefc_ix.waiting
		= tpd->leader_state.umrefc_ix.waiting;
	    if (waiting_unmanaged)
		set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM;

	    lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs,
						set_flags);
	    lflgs |= set_flags;
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		lflgs = block_thread(tpd);

	    if (waiting_unmanaged) {
		/* Need to check umrefc again */
		ETHR_MEMBAR(ETHR_StoreLoad);
		refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
		if (refc == 0) {
		    /* Need to force wakeup check */
		    force_wakeup_check = 1;
		}
	    }

	    if ((force_wakeup_check
		 || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
			       | ERTS_THR_PRGR_LFLG_WAITING_UM
			       | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
		     == ERTS_THR_PRGR_LFLG_NO_LEADER))
		&& got_sched_wakeups()) {
		/* Someone need to make progress */
		wakeup_managed(0);
	    }
	}
    }

    return tpd->leader;
}
コード例 #9
0
ファイル: erl_init.c プロジェクト: Luter/otp
static void
system_cleanup(int flush_async)
{
    /*
     * Make sure only one thread exits the runtime system.
     */
    if (erts_atomic_inc_read_nob(&exiting) != 1) {
	/*
	 * Another thread is currently exiting the system;
	 * wait for it to do its job.
	 */
#ifdef ERTS_SMP
	if (erts_thr_progress_is_managed_thread()) {
	    /*
	     * The exiting thread might be waiting for
	     * us to block; need to update status...
	     */
	    erts_thr_progress_active(NULL, 0);
	    erts_thr_progress_prepare_wait(NULL);
	}
#endif
	/* Wait forever... */
	while (1)
	    erts_milli_sleep(10000000);
    }

    /* No cleanup wanted if ...
     * 1. we are about to do an abnormal exit
     * 2. we haven't finished initializing, or
     * 3. another thread than the main thread is performing the exit
     *    (in threaded non smp case).
     */

    if (!flush_async
	|| !erts_initialized
#if defined(USE_THREADS) && !defined(ERTS_SMP)
	|| !erts_equal_tids(main_thread, erts_thr_self())
#endif
	)
	return;

#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
#endif

#ifdef HYBRID
    if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                (void *)ma_src_stack);
    if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                (void *)ma_dst_stack);
    if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                   (void *)ma_offset_stack);
    ma_src_stack = NULL;
    ma_dst_stack = NULL;
    ma_offset_stack = NULL;
    erts_cleanup_offheap(&erts_global_offheap);
#endif

#if defined(HYBRID) && !defined(INCREMENTAL)
    if (global_heap) {
	ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
		       (void*) global_heap,
		       sizeof(Eterm) * global_heap_sz);
    }
    global_heap = NULL;
#endif

#ifdef INCREMENTAL
    erts_cleanup_incgc();
#endif

    erts_exit_flush_async();
}
コード例 #10
0
ファイル: erl_init.c プロジェクト: JingkunLiu/Erlang-OTP
static void
system_cleanup(int exit_code)
{
    /* No cleanup wanted if ...
     * 1. we are about to do an abnormal exit
     * 2. we haven't finished initializing, or
     * 3. another thread than the main thread is performing the exit
     *    (in threaded non smp case).
     */

    if (exit_code != 0
	|| !erts_initialized
#if defined(USE_THREADS) && !defined(ERTS_SMP)
	|| !erts_equal_tids(main_thread, erts_thr_self())
#endif
	)
	return;

#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
    erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC); /* We never release it... */
#endif

#ifdef HYBRID
    if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                (void *)ma_src_stack);
    if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                (void *)ma_dst_stack);
    if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                   (void *)ma_offset_stack);
    ma_src_stack = NULL;
    ma_dst_stack = NULL;
    ma_offset_stack = NULL;
    erts_cleanup_offheap(&erts_global_offheap);
#endif

#if defined(HYBRID) && !defined(INCREMENTAL)
    if (global_heap) {
	ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
		       (void*) global_heap,
		       sizeof(Eterm) * global_heap_sz);
    }
    global_heap = NULL;
#endif

#ifdef INCREMENTAL
    erts_cleanup_incgc();
#endif

#if defined(USE_THREADS)
    exit_async();
#endif
#if HAVE_ERTS_MSEG
    erts_mseg_exit();
#endif

    /*
     * A lot more cleaning could/should have been done...
     */

}