Exemple #1
0
static void
release_update_permission(int release_updater)
{
    erts_mtx_lock(&update_table_permission_mtx);
    ASSERT(updater_process != NULL);

    if (release_updater) {
        erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS);
        if (!ERTS_PROC_IS_EXITING(updater_process)) {
            erts_resume(updater_process, ERTS_PROC_LOCK_STATUS);
        }
        erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS);
    }
    updater_process = NULL;

    while (update_queue != NULL) { /* Unleash the entire herd */
	struct update_queue_item* qitem = update_queue;
	erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
	if (!ERTS_PROC_IS_EXITING(qitem->p)) {
	    erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
	}
	erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
	update_queue = qitem->next;
	erts_proc_dec_refc(qitem->p);
	erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem);
    }
    erts_mtx_unlock(&update_table_permission_mtx);
}
Exemple #2
0
static void copy_literals_commit(void* null) {
    Process* p = committer_state.stager;
#ifdef DEBUG
    committer_state.stager = NULL;
#endif
    erts_release_code_write_permission();
    erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
    if (!ERTS_PROC_IS_EXITING(p)) {
	erts_resume(p, ERTS_PROC_LOCK_STATUS);
    }
    erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
    erts_proc_dec_refc(p);
}
Exemple #3
0
void
process_info(int to, void *to_arg)
{
    int i;
    for (i = 0; i < erts_max_processes; i++) {
	Process *p = erts_pix2proc(i);
	if (p && p->i != ENULL) {
	    if (!ERTS_PROC_IS_EXITING(p))
		print_process_info(to, to_arg, p);
	}
    }

    port_info(to, to_arg);
}
Exemple #4
0
static void
thr_prg_wake_up_later(void* bin_p)
{
    Binary* bin = bin_p;
    ErtsFlxCtrWakeUpLaterInfo* info = ERTS_MAGIC_BIN_DATA(bin);
    Process* p = info->process;
    /* Resume the requesting process */
    erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
    if (!ERTS_PROC_IS_EXITING(p)) {
        erts_resume(p, ERTS_PROC_LOCK_STATUS);
    }
    erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
    /* Free data */
    erts_proc_dec_refc(p);
    erts_bin_release(bin);
}
Exemple #5
0
void
process_info(int to, void *to_arg)
{
    int i, max = erts_ptab_max(&erts_proc);
    for (i = 0; i < max; i++) {
	Process *p = erts_pix2proc(i);
	if (p && p->i != ENULL) {
	    /* Do not include processes with no heap,
	     * they are most likely just created and has invalid data
	     */
	    if (!ERTS_PROC_IS_EXITING(p) && p->heap != NULL)
		print_process_info(to, to_arg, p);
	}
    }

    port_info(to, to_arg);
}
Exemple #6
0
void erts_release_code_write_permission(void)
{
    erts_smp_mtx_lock(&the_code_ix_queue_lock);
    while (the_code_ix_queue != NULL) { /* unleash the entire herd */
	struct code_ix_queue_item* qitem = the_code_ix_queue;
	erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
	if (!ERTS_PROC_IS_EXITING(qitem->p)) {
	    erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
	}
	erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
	the_code_ix_queue = qitem->next;
	erts_smp_proc_dec_refc(qitem->p);
	erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
    }
    the_code_ix_lock = 0;
    erts_smp_mtx_unlock(&the_code_ix_queue_lock);
}
Exemple #7
0
static void
thr_prg_wake_up_and_count(void* bin_p)
{
    Binary* bin = bin_p;
    DecentralizedReadSnapshotInfo* info = ERTS_MAGIC_BIN_DATA(bin);
    Process* p = info->process;
    ErtsFlxCtrDecentralizedCtrArray* array = info->array;
    ErtsFlxCtrDecentralizedCtrArray* next = info->next_array;
    int i, sched;
    /* Reset result array */
    for (i = 0; i < info->nr_of_counters; i++) {
        info->result[i] = 0;
    }
    /* Read result from snapshot */
    for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) {
        for (i = 0; i < info->nr_of_counters; i++) {
            info->result[i] = info->result[i] +
                erts_atomic_read_nob(&array->array[sched].counters[i]);
        }
    }
    /* Update the next decentralized counter array */
    for (i = 0; i < info->nr_of_counters; i++) {
        erts_atomic_add_nob(&next->array[0].counters[i], info->result[i]);
    }
    /* Announce that the snapshot is done */
    {
    Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING;
    if (expected != erts_atomic_cmpxchg_mb(&next->snapshot_status,
                                           ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING,
                                           expected)) {
        /* The CAS failed which means that this thread need to free the next array. */
        erts_free(info->alloc_type, next->block_start);
    }
    }
    /* Resume the process that requested the snapshot */
    erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
    if (!ERTS_PROC_IS_EXITING(p)) {
        erts_resume(p, ERTS_PROC_LOCK_STATUS);
    }
    /* Free the memory that is no longer needed */
    erts_free(info->alloc_type, array->block_start);
    erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
    erts_proc_dec_refc(p);
    erts_bin_release(bin);
}
Exemple #8
0
void erts_release_code_write_permission(void)
{
    erts_smp_mtx_lock(&code_write_permission_mtx);
    ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
    while (code_write_queue != NULL) { /* unleash the entire herd */
	struct code_write_queue_item* qitem = code_write_queue;
	erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
	if (!ERTS_PROC_IS_EXITING(qitem->p)) {
	    erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
	}
	erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
	code_write_queue = qitem->next;
	erts_proc_dec_refc(qitem->p);
	erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
    }
    code_writing_process = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_tsd_set(has_code_write_permission, (void *) 0);
#endif
    erts_smp_mtx_unlock(&code_write_permission_mtx);
}
Exemple #9
0
static BIF_RETTYPE
dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
{
    BIF_RETTYPE ret;
    if (am_scheduler == arg1) {
	ErtsSchedulerData *esdp;
	if (arg2 != am_type) 
	    goto badarg;
	esdp = erts_proc_sched_data(c_p);
	if (!esdp)
            goto scheduler_type_error;
      
        switch (esdp->type) {
        case ERTS_SCHED_NORMAL:
	    ERTS_BIF_PREP_RET(ret, am_normal);
            break;
        case ERTS_SCHED_DIRTY_CPU:
	    ERTS_BIF_PREP_RET(ret, am_dirty_cpu);
            break;
        case ERTS_SCHED_DIRTY_IO:
	    ERTS_BIF_PREP_RET(ret, am_dirty_io);
            break;
        default:
        scheduler_type_error:
	    ERTS_BIF_PREP_RET(ret, am_error);
            break;
        }
    }
    else if (am_error == arg1) {
	switch (arg2) {
	case am_notsup:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP);
	    break;
	case am_undef:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
	    break;
	case am_badarith:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH);
	    break;
	case am_noproc:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC);
	    break;
	case am_system_limit:
	    ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
	    break;
	case am_badarg:
	default:
	    goto badarg;
	}
    }
    else if (am_copy == arg1) {
	int i;
	Eterm res;

	for (res = NIL, i = 0; i < 1000; i++) {
	    Eterm *hp, sz;
	    Eterm cpy;
	    /* We do not want this to be optimized,
	       but rather the oposite... */
	    sz = size_object(arg2);
	    hp = HAlloc(c_p, sz);
	    cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap);
	    hp = HAlloc(c_p, 2);
	    res = CONS(hp, cpy, res);
	}

	ERTS_BIF_PREP_RET(ret, res);
    }
    else if (am_send == arg1) {
	dirty_send_message(c_p, arg2, am_ok);
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("wait", arg1)) {
	if (!ms_wait(c_p, arg2, type == am_dirty_cpu))
	    goto badarg;
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("reschedule", arg1)) {
	/*
	 * Reschedule operation after decrement of two until we reach
	 * zero. Switch between dirty scheduler types when 'n' is
	 * evenly divided by 4. If the initial value wasn't evenly
	 * dividable by 2, throw badarg exception.
	 */
	Eterm next_type;
	Sint n;
	if (!term_to_Sint(arg2, &n) || n < 0)
	    goto badarg;
	if (n == 0)
	    ERTS_BIF_PREP_RET(ret, am_ok);
	else {
	    Eterm argv[3];
	    Eterm eint = erts_make_integer((Uint) (n - 2), c_p);
	    if (n % 4 != 0)
		next_type = type;
	    else {
		switch (type) {
		case am_dirty_cpu: next_type = am_dirty_io; break;
		case am_dirty_io: next_type = am_normal; break;
		case am_normal: next_type = am_dirty_cpu; break;
		default: goto badarg;
		}
	    }
	    switch (next_type) {
	    case am_dirty_io:
		argv[0] = arg1;
		argv[1] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_io_2,
					ERTS_SCHED_DIRTY_IO,
					am_erts_debug,
					am_dirty_io,
					2);
		break;
	    case am_dirty_cpu:
		argv[0] = arg1;
		argv[1] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_cpu_2,
					ERTS_SCHED_DIRTY_CPU,
					am_erts_debug,
					am_dirty_cpu,
					2);
		break;
	    case am_normal:
		argv[0] = am_normal;
		argv[1] = arg1;
		argv[2] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_3,
					ERTS_SCHED_NORMAL,
					am_erts_debug,
					am_dirty,
					3);
		break;
	    default:
		goto badarg;
	    }
	}
    }
    else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) {
	ERTS_DECL_AM(ready);
	ERTS_DECL_AM(done);
	dirty_send_message(c_p, arg2, AM_ready);
	ms_wait(c_p, make_small(6000), 0);
	dirty_send_message(c_p, arg2, AM_done);
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) {
	Process *real_c_p = erts_proc_shadow2real(c_p);
	Eterm *hp, *hp2;
	Uint sz;
	int i;
	ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
        int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO;

	if (ERTS_PROC_IS_EXITING(real_c_p))
	    goto badarg;
	dirty_send_message(c_p, arg2, am_alive);

	/* Wait until dead */
	while (!ERTS_PROC_IS_EXITING(real_c_p)) {
            if (dirty_io)
                ms_wait(c_p, make_small(100), 0);
            else
                erts_thr_yield();
        }

	ms_wait(c_p, make_small(1000), 0);

	/* Should still be able to allocate memory */
	hp = HAlloc(c_p, 3); /* Likely on heap */
	sz = 10000;
	hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */
	*hp2 = make_pos_bignum_header(sz);
	for (i = 1; i < sz; i++)
	    hp2[i] = (Eterm) 4711;
	ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2)));
    }
    else {
    badarg:
	ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
    }
    return ret;
}
Exemple #10
0
Process *
erts_pid2proc_opt(Process *c_p,
		  ErtsProcLocks c_p_have_locks,
		  Eterm pid,
		  ErtsProcLocks pid_need_locks,
		  int flags)
{
    Process *dec_refc_proc = NULL;
    ErtsThrPrgrDelayHandle dhndl;
    ErtsProcLocks need_locks;
    Uint pix;
    Process *proc;
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
    ErtsProcLocks lcnt_locks;
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
    if (c_p) {
	ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks;
	if (might_unlock)
	    erts_proc_lc_might_unlock(c_p, might_unlock);
    }
#endif

    if (is_not_internal_pid(pid))
	return NULL;
    pix = internal_pid_index(pid);

    ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
    need_locks = pid_need_locks;

    if (c_p && c_p->common.id == pid) {
	ASSERT(c_p->common.id != ERTS_INVALID_PID);
	ASSERT(c_p == erts_pix2proc(pix));

	if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
	    && ERTS_PROC_IS_EXITING(c_p))
	    return NULL;
	need_locks &= ~c_p_have_locks;
	if (!need_locks) {
	    if (flags & ERTS_P2P_FLG_INC_REFC)
		erts_proc_inc_refc(c_p);
	    return c_p;
	}
    }

    dhndl = erts_thr_progress_unmanaged_delay();

    proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix);

    if (proc) {
	if (proc->common.id != pid)
	    proc = NULL;
	else if (!need_locks) {
	    if (flags & ERTS_P2P_FLG_INC_REFC)
		erts_proc_inc_refc(proc);
	}
	else {
	    int busy;

#if ERTS_PROC_LOCK_OWN_IMPL
#ifdef ERTS_ENABLE_LOCK_COUNT
	    lcnt_locks = need_locks;
	    if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
	    	erts_lcnt_proc_lock(&proc->lock, need_locks);
	    }
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
	    /* Make sure erts_pid2proc_safelock() is enough to handle
	       a potential lock order violation situation... */
	    busy = erts_proc_lc_trylock_force_busy(proc, need_locks);
	    if (!busy)
#endif
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
	    {
		/* Try a quick trylock to grab all the locks we need. */
		busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
		erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
		if (!busy)
		    erts_proc_lock_op_debug(proc, need_locks, 1);
#endif
	    }

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
	    if (flags & ERTS_P2P_FLG_TRY_LOCK)
		erts_lcnt_proc_trylock(&proc->lock, need_locks,
				       busy ? EBUSY : 0);
#endif

	    if (!busy) {
		if (flags & ERTS_P2P_FLG_INC_REFC)
		    erts_proc_inc_refc(proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
	    	/* all is great */
	    	if (!(flags & ERTS_P2P_FLG_TRY_LOCK))
		    erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks,
					       __FILE__, __LINE__);
#endif

	    }
	    else {
		if (flags & ERTS_P2P_FLG_TRY_LOCK)
		    proc = ERTS_PROC_LOCK_BUSY;
		else {
		    int managed;
		    if (flags & ERTS_P2P_FLG_INC_REFC)
			erts_proc_inc_refc(proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
		    erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
#endif

		    managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
		    if (!managed) {
			erts_proc_inc_refc(proc);
			erts_thr_progress_unmanaged_continue(dhndl);
			dec_refc_proc = proc;

			/*
			 * We don't want to call
			 * erts_thr_progress_unmanaged_continue()
			 * again.
			 */
			dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED;
		    }

		    proc_safelock(managed,
				  c_p,
				  c_p_have_locks,
				  c_p_have_locks,
				  proc,
				  0,
				  need_locks);
		}
	    }
        }
    }

    if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
	erts_thr_progress_unmanaged_continue(dhndl);

    if (need_locks
	&& proc
	&& proc != ERTS_PROC_LOCK_BUSY
	&& (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
	    ? ERTS_PROC_IS_EXITING(proc)
	    : (proc
	       != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) {

	erts_smp_proc_unlock(proc, need_locks);

	if (flags & ERTS_P2P_FLG_INC_REFC)
	    dec_refc_proc = proc;
	proc = NULL;

    }

    if (dec_refc_proc)
	erts_proc_dec_refc(dec_refc_proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
    ERTS_LC_ASSERT(!proc
		   || proc == ERTS_PROC_LOCK_BUSY
		   || (pid_need_locks ==
		       (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
			& pid_need_locks)));
#endif

    return proc;
}
Exemple #11
0
BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
{
    BIF_RETTYPE res;
    Port *prt;
    int flags = 0;
    Eterm ref;

    if (is_not_nil(BIF_ARG_3)) {
	Eterm l = BIF_ARG_3;
	while (is_list(l)) {
	    Eterm* cons = list_val(l);
	    Eterm car = CAR(cons);
	    if (car == am_force)
		flags |= ERTS_PORT_SIG_FLG_FORCE;
	    else if (car == am_nosuspend)
		flags |= ERTS_PORT_SIG_FLG_NOSUSPEND;
	    else
		BIF_RET(am_badarg);
	    l = CDR(cons);
	}
	if (!is_nil(l))
	    BIF_RET(am_badarg);
    }

    prt = sig_lookup_port(BIF_P, BIF_ARG_1);
    if (!prt)
	BIF_RET(am_badarg);

    if (flags & ERTS_PORT_SIG_FLG_FORCE) {
	if (!(prt->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY))
	    BIF_RET(am_notsup);
    }

#ifdef DEBUG
    ref = NIL;
#endif

    switch (erts_port_output(BIF_P, flags, prt, prt->common.id, BIF_ARG_2, &ref)) {
    case ERTS_PORT_OP_CALLER_EXIT:
    case ERTS_PORT_OP_BADARG:
    case ERTS_PORT_OP_DROPPED:
 	ERTS_BIF_PREP_RET(res, am_badarg);
	break;
    case ERTS_PORT_OP_BUSY:
	ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
	if (flags & ERTS_PORT_SIG_FLG_NOSUSPEND)
	    ERTS_BIF_PREP_RET(res, am_false);
	else {
	    erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, prt);
	    ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_erts_internal_port_command_3],
				 BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
	}
	break;
    case ERTS_PORT_OP_BUSY_SCHEDULED:
	ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
	/* Fall through... */
    case ERTS_PORT_OP_SCHEDULED:
	ASSERT(is_internal_ordinary_ref(ref));
	ERTS_BIF_PREP_RET(res, ref);
	break;
    case ERTS_PORT_OP_DONE:
	ERTS_BIF_PREP_RET(res, am_true);
	break;
    default:
	ERTS_INTERNAL_ERROR("Unexpected erts_port_output() result");
	break;
    }

    if (ERTS_PROC_IS_EXITING(BIF_P)) {
	KILL_CATCHES(BIF_P);	/* Must exit */
	ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR);
    }

    return res;
}
Exemple #12
0
static ERTS_INLINE int
is_proc_alive(Process *p)
{
    return !ERTS_PROC_IS_EXITING(p);
}
Exemple #13
0
static BIF_RETTYPE
do_port_command(Process *BIF_P, Eterm arg1, Eterm arg2, Eterm arg3,
		Uint32 flags)
{
    BIF_RETTYPE res;
    Port *p;

    /* Trace sched out before lock check wait */    
    if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
	trace_virtual_sched(BIF_P, am_out);
    }
    
    if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
    	profile_runnable_proc(BIF_P, am_inactive);
    }

    p = id_or_name2port(BIF_P, arg1);
    if (!p) {
    	if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
	    trace_virtual_sched(BIF_P, am_in);
	}
    	if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
    	    profile_runnable_proc(BIF_P, am_active);
    	}
	BIF_ERROR(BIF_P, BADARG);
    }
    
    /* Trace port in, id_or_name2port causes wait */

    if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
	trace_sched_ports_where(p, am_in, am_command);
    }
    if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
    	profile_runnable_port(p, am_active);
    }

    ERTS_BIF_PREP_RET(res, am_true);

    if ((flags & ERTS_PORT_COMMAND_FLAG_FORCE)
	&& !(p->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) {
	ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_NOTSUP);
    }
    else if (!(flags & ERTS_PORT_COMMAND_FLAG_FORCE)
	     && p->status & ERTS_PORT_SFLG_PORT_BUSY) {
	if (flags & ERTS_PORT_COMMAND_FLAG_NOSUSPEND) {
	    ERTS_BIF_PREP_RET(res, am_false);
	}
	else {
	    erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, p);
	    if (erts_system_monitor_flags.busy_port) {
		monitor_generic(BIF_P, am_busy_port, p->id);
	    }
	    ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P,
				 arg1, arg2, arg3);
	}
    } else {
	int wres;
	erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
	ERTS_SMP_CHK_NO_PROC_LOCKS;
	wres = erts_write_to_port(BIF_P->id, p, arg2);
	erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
	if (wres != 0) {
	    ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
	}
    }
    
    if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
	trace_sched_ports_where(p, am_out, am_command);
    }
    if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
    	profile_runnable_port(p, am_inactive);
    }

    erts_port_release(p);
    /* Trace sched in after port release */
    if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
	trace_virtual_sched(BIF_P, am_in);
    }
    if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
    	profile_runnable_proc(BIF_P, am_active);
    }
    
    if (ERTS_PROC_IS_EXITING(BIF_P)) {
	KILL_CATCHES(BIF_P);	/* Must exit */
	ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR);
    }
    return res;
}
Exemple #14
0
Process *
erts_pid2proc_opt(Process *c_p,
		  ErtsProcLocks c_p_have_locks,
		  Eterm pid,
		  ErtsProcLocks pid_need_locks,
		  int flags)
{
    Process *dec_refc_proc = NULL;
    int need_ptl;
    ErtsProcLocks need_locks;
    Uint pix;
    Process *proc;
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
    ErtsProcLocks lcnt_locks;
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
    if (c_p) {
	ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks;
	if (might_unlock)
	    erts_proc_lc_might_unlock(c_p, might_unlock);
    }
#endif

    if (is_not_internal_pid(pid))
	return NULL;
    pix = internal_pid_index(pid);

    ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
    need_locks = pid_need_locks;

    if (c_p && c_p->id == pid) {
	ASSERT(c_p->id != ERTS_INVALID_PID);
	ASSERT(c_p == erts_pix2proc(pix));

	if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
	    && ERTS_PROC_IS_EXITING(c_p))
	    return NULL;
	need_locks &= ~c_p_have_locks;
	if (!need_locks) {
	    if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
		erts_smp_proc_inc_refc(c_p);
	    return c_p;
	}
    }

    need_ptl = !erts_get_scheduler_id();

    if (need_ptl)
	erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);

    proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]);

    if (proc) {
	if (proc->id != pid)
	    proc = NULL;
	else if (!need_locks) {
	    if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
		erts_smp_proc_inc_refc(proc);
	}
	else {
	    int busy;

#if ERTS_PROC_LOCK_OWN_IMPL
#ifdef ERTS_ENABLE_LOCK_COUNT
	    lcnt_locks = need_locks;
	    if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
	    	erts_lcnt_proc_lock(&proc->lock, need_locks);
	    }
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
	    /* Make sure erts_pid2proc_safelock() is enough to handle
	       a potential lock order violation situation... */
	    busy = erts_proc_lc_trylock_force_busy(proc, need_locks);
	    if (!busy)
#endif
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
	    {
		/* Try a quick trylock to grab all the locks we need. */
		busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
		erts_proc_lc_trylock(proc, need_locks, !busy);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
		if (!busy)
		    erts_proc_lock_op_debug(proc, need_locks, 1);
#endif
	    }

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
	    if (flags & ERTS_P2P_FLG_TRY_LOCK)
		erts_lcnt_proc_trylock(&proc->lock, need_locks,
				       busy ? EBUSY : 0);
#endif

	    if (!busy) {
		if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
		    erts_smp_proc_inc_refc(proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
	    	/* all is great */
	    	if (!(flags & ERTS_P2P_FLG_TRY_LOCK))
		    erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks,
					       __FILE__, __LINE__);
#endif

	    }
	    else {
		if (flags & ERTS_P2P_FLG_TRY_LOCK)
		    proc = ERTS_PROC_LOCK_BUSY;
		else {
		    if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
			erts_smp_proc_inc_refc(proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
		    erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
#endif

		    if (need_ptl) {
			erts_smp_proc_inc_refc(proc);
			dec_refc_proc = proc;
			erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
			need_ptl = 0;
		    }

		    proc_safelock(!need_ptl,
				  c_p,
				  c_p_have_locks,
				  c_p_have_locks,
				  proc,
				  0,
				  need_locks);
		}
	    }
        }
    }

    if (need_ptl)
	erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);

    if (need_locks
	&& proc
	&& proc != ERTS_PROC_LOCK_BUSY
	&& (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
	    ? ERTS_PROC_IS_EXITING(proc)
	    : (proc
	       != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) {

	erts_smp_proc_unlock(proc, need_locks);

	if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
	    dec_refc_proc = proc;
	proc = NULL;

    }

    if (dec_refc_proc)
	erts_smp_proc_dec_refc(dec_refc_proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
    ERTS_LC_ASSERT(!proc
		   || proc == ERTS_PROC_LOCK_BUSY
		   || (pid_need_locks ==
		       (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
			& pid_need_locks)));
#endif

    return proc;
}