Пример #1
0
static void
wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
{
    int hix;
    for (hix = 0; hix < umwd->high_sz; hix++) {
	erts_aint32_t hmask = erts_atomic32_read_nob(&umwd->high[hix]);
	if (hmask) {
	    int hbase = hix << ERTS_THR_PRGR_BM_SHIFT;
	    int hbit;
	    for (hbit = 0; hbit < ERTS_THR_PRGR_BM_BITS; hbit++) {
		if (hmask & (1 << hbit)) {
		    erts_aint_t lmask;
		    int lix = hbase + hbit;
		    ASSERT(0 <= lix && lix < umwd->low_sz);
		    lmask = erts_atomic32_read_nob(&umwd->low[lix]);
		    if (lmask) {
			int lbase = lix << ERTS_THR_PRGR_BM_SHIFT;
			int lbit;
			for (lbit = 0; lbit < ERTS_THR_PRGR_BM_BITS; lbit++) {
			    if (lmask & (1 << lbit)) {
				int id = lbase + lbit;
				wakeup_unmanaged(id);
			    }
			}
			erts_atomic32_set_nob(&umwd->low[lix], 0);
		    }
		}
	    }
	    erts_atomic32_set_nob(&umwd->high[hix], 0);
	}
    }
}
Пример #2
0
static void
handle_wakeup_requests(ErtsThrPrgrVal current)
{
    ErtsThrPrgrManagedWakeupData *mwd;
    ErtsThrPrgrUnmanagedWakeupData *umwd;
    int wix, len, i;

    wix = ERTS_THR_PRGR_WAKEUP_IX(current);

    mwd = intrnl->managed.data[wix];
    len = erts_atomic32_read_nob(&mwd->len);
    ASSERT(len >= 0);
    if (len) {
	for (i = 0; i < len; i++)
	    wakeup_managed(mwd->id[i]);
	erts_atomic32_set_nob(&mwd->len, 0);
    }

    umwd = intrnl->unmanaged.data[wix];
    len = erts_atomic32_read_nob(&umwd->len);
    ASSERT(len >= 0);
    if (len) {
	wakeup_unmanaged_threads(umwd);
	erts_atomic32_set_nob(&umwd->len, 0);
    }

}
Пример #3
0
static void
kill_ports_driver_unloaded(DE_Handle *dh)
{
    int ix, max = erts_ptab_max(&erts_port);

    for (ix = 0; ix < max; ix++) {
	erts_aint32_t state;
	Port* prt = erts_pix2port(ix);
	if (!prt)
	    continue;

	ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;

	state = erts_atomic32_read_nob(&prt->state);
	if (state & FREE_PORT_FLAGS)
	    continue;

	erts_port_lock(prt);

	state = erts_atomic32_read_nob(&prt->state);
	if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh)
	    driver_failure_atom(ERTS_Port2ErlDrvPort(prt), "driver_unloaded");

	erts_port_release(prt);
    }
}
Пример #4
0
void
erts_cleanup_port_data(Port *prt)
{
    ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP);
    cleanup_old_port_data(erts_smp_atomic_xchg_nob(&prt->data,
						   (erts_aint_t) NULL));
}
Пример #5
0
void
erts_cleanup_port_data(Port *prt)
{
    ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP);
    cleanup_old_port_data(erts_smp_atomic_read_nob(&prt->data));
    erts_smp_atomic_set_nob(&prt->data, (erts_aint_t) THE_NON_VALUE);
}
Пример #6
0
Sint
erts_complete_off_heap_message_queue_change(Process *c_p)
{
    int reds = 1;

    ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
    ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG);
    ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ);

    /*
     * This job was first initiated when the process changed to off heap
     * message queue management. Since then ERTS_PSFLG_OFF_HEAP_MSGQ
     * has been set. However, the management state might have been changed
     * again (multiple times) since then. Check users last requested state
     * (the flags F_OFF_HEAP_MSGQ, and F_ON_HEAP_MSGQ), and make the state
     * consistent with that.
     */

    if (!(c_p->flags & F_OFF_HEAP_MSGQ))
	erts_atomic32_read_band_nob(&c_p->state,
					~ERTS_PSFLG_OFF_HEAP_MSGQ);
    else {
	reds += 2;
	erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
	ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
	erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
	reds += erts_move_messages_off_heap(c_p);
    }
    c_p->flags &= ~F_OFF_HEAP_MSGQ_CHNG;
    return reds;
}
Пример #7
0
void
erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
{
    erts_aint32_t lflgs;
    ErtsThrPrgrData *tpd = thr_prgr_data(esdp);

#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif

    block_count_dec();

    tpd->confirmed = ERTS_THR_PRGR_VAL_WAITING;
    set_mb(&intrnl->thr[tpd->id].data.current, ERTS_THR_PRGR_VAL_WAITING);

    lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);

    if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
		  | ERTS_THR_PRGR_LFLG_WAITING_UM
		  | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
	== ERTS_THR_PRGR_LFLG_NO_LEADER 
	&& got_sched_wakeups()) {
	/* Someone need to make progress */
	wakeup_managed(0);
    }
}
Пример #8
0
void
erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
{
    ErtsThrPrgrData *tpd = thr_prgr_data(esdp);

#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif

    ERTS_THR_PROGRESS_STATE_DEBUG_SET_ACTIVE(tpd->id, on);

    if (on) {
	ASSERT(!tpd->active);
	tpd->active = 1;
	erts_atomic32_inc_nob(&intrnl->misc.data.lflgs);
    }
    else {
	ASSERT(tpd->active);
	tpd->active = 0;
	erts_atomic32_dec_nob(&intrnl->misc.data.lflgs);
	if (update(tpd))
	    leader_update(tpd);
    }

#ifdef DEBUG
    {
	erts_aint32_t n = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	n &= ERTS_THR_PRGR_LFLG_ACTIVE_MASK;
	ASSERT(tpd->active <= n && n <= intrnl->managed.no);
    }
#endif

}
Пример #9
0
Uint erts_process_memory(Process *p, int include_sigs_in_transit)
{
    Uint size = 0;
    struct saved_calls *scb;

    size += sizeof(Process);

    if ((erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) == 0) {
        erts_link_tree_foreach(ERTS_P_LINKS(p),
                               link_size, (void *) &size);
        erts_monitor_tree_foreach(ERTS_P_MONITORS(p),
                                  monitor_size, (void *) &size);
        erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p),
                                  monitor_size, (void *) &size);
    }
    size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
    if (p->abandoned_heap)
        size += (p->hend - p->heap) * sizeof(Eterm);
    if (p->old_hend && p->old_heap)
        size += (p->old_hend - p->old_heap) * sizeof(Eterm);

    if (!include_sigs_in_transit) {
        /*
         * Size of message queue!
         *
         * Note that this assumes that any part of message
         * queue located in middle queue have been moved
         * into the inner queue prior to this call.
         * process_info() management ensures this is done-
         */
        ErtsMessage *mp;
        for (mp = p->sig_qs.first; mp; mp = mp->next) {
            ASSERT(ERTS_SIG_IS_MSG((ErtsSignal *) mp));
            size += sizeof(ErtsMessage);
            if (mp->data.attached)
                size += erts_msg_attached_data_size(mp) * sizeof(Eterm);
        }
    }
    else {
        /*
         * Size of message queue plus size of all signals
         * in transit to the process!
         */
        erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
        erts_proc_sig_fetch(p);
        erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);

        ERTS_FOREACH_SIG_PRIVQS(
            p, mp,
            {
                size += sizeof(ErtsMessage);
                if (ERTS_SIG_IS_NON_MSG((ErtsSignal *) mp))
                    size += erts_proc_sig_signal_size((ErtsSignal *) mp);
                else if (mp->data.attached)
                    size += erts_msg_attached_data_size(mp) * sizeof(Eterm);
            });
    }
Пример #10
0
static int
got_sched_wakeups(void)
{
    int wix;

    ERTS_THR_MEMORY_BARRIER;

    for (wix = 0; wix < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; wix++) {
 	ErtsThrPrgrManagedWakeupData **mwd = intrnl->managed.data;
	if (erts_atomic32_read_nob(&mwd[wix]->len))
	    return 1;
    }
    for (wix = 0; wix < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; wix++) {
 	ErtsThrPrgrUnmanagedWakeupData **umwd = intrnl->unmanaged.data;
	if (erts_atomic32_read_nob(&umwd[wix]->len))
	    return 1;
    }
    return 0;
}
Пример #11
0
void erts_thr_progress_dbg_print_state(void)
{
    int id;
    int sz = intrnl->managed.no;

    erts_fprintf(stderr, "--- thread progress ---\n");
    erts_fprintf(stderr,"current=%b64u\n", erts_thr_progress_current());
    for (id = 0; id < sz; id++) {
	ErtsThrPrgrVal current = read_nob(&intrnl->thr[id].data.current);
#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
	erts_aint32_t state_debug;
	char *active, *leader;

	state_debug = erts_atomic32_read_nob(&intrnl->thr[id].data.state_debug);
	active = (state_debug & ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE
		  ? "true"
		  : "false");
	leader = (state_debug & ERTS_THR_PROGRESS_STATE_DEBUG_LEADER
		  ? "true"
		  : "false");
#endif
	if (current == ERTS_THR_PRGR_VAL_WAITING)
	    erts_fprintf(stderr,
			 "  id=%d, current=WAITING"
#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
			 ", active=%s, leader=%s"
#endif
			 "\n", id
#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
			 , active, leader
#endif
		);
	else
	    erts_fprintf(stderr,
			 "  id=%d, current=%b64u"
#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
			 ", active=%s, leader=%s"
#endif
			 "\n", id, current
#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
			 , active, leader
#endif
		);
    }
    erts_fprintf(stderr, "-----------------------\n");
    

}
Пример #12
0
int efile_close(efile_data_t *d) {
    efile_win_t *w = (efile_win_t*)d;
    HANDLE handle;

    ASSERT(erts_atomic32_read_nob(&d->state) == EFILE_STATE_CLOSED);
    ASSERT(w->handle != INVALID_HANDLE_VALUE);

    handle = w->handle;
    w->handle = INVALID_HANDLE_VALUE;

    if(!CloseHandle(handle)) {
        w->common.posix_errno = windows_to_posix_errno(GetLastError());
        return 0;
    }

    return 1;
}
Пример #13
0
static int
update(ErtsThrPrgrData *tpd)
{
    int res;
    ErtsThrPrgrVal val;

    if (tpd->leader)
	res = 1;
    else {
	erts_aint32_t lflgs;
	res = 0;
	val = read_acqb(&erts_thr_prgr__.current);
	if (tpd->confirmed == val) {
	    val++;
	    if (val == ERTS_THR_PRGR_VAL_WAITING)
		val = 0;
	    tpd->confirmed = val;
	    set_mb(&intrnl->thr[tpd->id].data.current, val);
	}

	lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
	    res = 1; /* Need to block in leader_update() */

	if ((lflgs & ERTS_THR_PRGR_LFLG_NO_LEADER)
	    && (tpd->active || ERTS_THR_PRGR_LFLGS_ACTIVE(lflgs) == 0)) {
	    /* Try to take over leadership... */
	    erts_aint32_t olflgs;
	    olflgs = erts_atomic32_read_band_acqb(
		&intrnl->misc.data.lflgs,
		~ERTS_THR_PRGR_LFLG_NO_LEADER);
	    if (olflgs & ERTS_THR_PRGR_LFLG_NO_LEADER) {
		tpd->leader = 1;
#if ERTS_THR_PRGR_PRINT_LEADER
		erts_fprintf(stderr, "L -> %d\n", tpd->id);
#endif
		ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 1);
	    }
	}
	res |= tpd->leader;
    }
    return res;
}
Пример #14
0
int efile_close(efile_data_t *d, posix_errno_t *error) {
    efile_win_t *w = (efile_win_t*)d;
    HANDLE handle;

    ASSERT(enif_thread_type() == ERL_NIF_THR_DIRTY_IO_SCHEDULER);
    ASSERT(erts_atomic32_read_nob(&d->state) == EFILE_STATE_CLOSED);
    ASSERT(w->handle != INVALID_HANDLE_VALUE);

    handle = w->handle;
    w->handle = INVALID_HANDLE_VALUE;

    enif_release_resource(d);

    if(!CloseHandle(handle)) {
        *error = windows_to_posix_errno(GetLastError());
        return 0;
    }

    return 1;
}
Пример #15
0
int efile_close(efile_data_t *d) {
    efile_unix_t *u = (efile_unix_t*)d;
    int fd;

    ASSERT(erts_atomic32_read_nob(&d->state) == EFILE_STATE_CLOSED);
    ASSERT(u->fd != -1);

    fd = u->fd;
    u->fd = -1;

    /* close(2) either always closes (*BSD, Linux) or leaves the fd in an
     * undefined state (POSIX 2008, Solaris), so we must not retry on EINTR. */

    if(close(fd) < 0) {
        u->common.posix_errno = errno;
        return 0;
    }

    return 1;
}
Пример #16
0
static ERTS_INLINE void
unmanaged_continue(ErtsThrPrgrDelayHandle handle)
{
    int umrefc_ix = (int) handle;
    erts_aint_t refc;

    ASSERT(umrefc_ix == 0 || umrefc_ix == 1);
    refc = erts_atomic_dec_read_relb(&intrnl->umrefc[umrefc_ix].refc);
    ASSERT(refc >= 0);
    if (refc == 0) {
	erts_aint_t lflgs;
	ERTS_THR_READ_MEMORY_BARRIER;
	lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
		      | ERTS_THR_PRGR_LFLG_WAITING_UM
		      | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
	    == (ERTS_THR_PRGR_LFLG_NO_LEADER|ERTS_THR_PRGR_LFLG_WAITING_UM)
	    && got_sched_wakeups()) {
	    /* Others waiting for us... */
	    wakeup_managed(0);
	}
    }
}
Пример #17
0
static erts_aint32_t
block_thread(ErtsThrPrgrData *tpd)
{
    erts_aint32_t lflgs;
    ErtsThrPrgrCallbacks *cbp = &intrnl->managed.callbacks[tpd->id];

    do {
	block_count_dec();

	while (1) {
	    cbp->prepare_wait(cbp->arg);
	    lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		cbp->wait(cbp->arg);
	    else
		break;
	}

    } while (block_count_inc());

    cbp->finalize_wait(cbp->arg);

    return lflgs;
}
Пример #18
0
/* 
 * Called from erl_process.c.
 */
void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) 
{
    erts_driver_t *drv;
    erts_proc_unlock(p, plocks);
    lock_drv_list();
    drv = driver_list;
    while (drv != NULL) {
	if (drv->handle != NULL && drv->handle->status != ERL_DE_PERMANENT) {
	    DE_ProcEntry **pe = &(drv->handle->procs);
	    int kill_ports = (drv->handle->flags & ERL_DE_FL_KILL_PORTS);
	    int left = 0;
	    while ((*pe) != NULL) {
		if ((*pe)->proc == p) {
		    DE_ProcEntry *r = *pe;
		    *pe = r->next;
		    if (!(r->flags & ERL_DE_FL_DEREFERENCED) && 
			r->awaiting_status == ERL_DE_PROC_LOADED) {
			erts_ddll_dereference_driver(drv->handle);
		    }
		    erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) r);
		} else {
		    if ((*pe)->awaiting_status == ERL_DE_PROC_LOADED) {
			++left;
		    }
		    pe = &((*pe)->next);
		}
	    }
	    if (!left) {
		DE_Handle *dh = drv->handle;
		if (dh->status == ERL_DE_RELOAD ||
		    dh->status == ERL_DE_FORCE_RELOAD) {
		    notify_all(dh, drv->name, 
			       ERL_DE_PROC_AWAIT_LOAD, am_DOWN, am_load_cancelled);
		    erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_full_path);
		    erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_driver_name);
		    dh->reload_full_path = dh->reload_driver_name = NULL; 
		    dh->reload_flags = 0;
		} 
		dh->status = ERL_DE_UNLOAD;
	    }
	    if (!left
		&& erts_atomic32_read_nob(&drv->handle->port_count) > 0) {
		if (kill_ports) {
		    DE_Handle *dh = drv->handle;
		    erts_ddll_reference_driver(dh);
		    dh->status = ERL_DE_FORCE_UNLOAD;
		    unlock_drv_list();
		    kill_ports_driver_unloaded(dh);
		    lock_drv_list(); /* Needed for future list operations */
		    drv = drv->next; /* before allowing destruction */
		    erts_ddll_dereference_driver(dh);
		} else {
		    drv = drv->next;
		}
	    } else {
		drv = drv->next;
	    }
	} else {
	    drv = drv->next;
	}
    }
    unlock_drv_list();
    erts_proc_lock(p, plocks);
}
Пример #19
0
static ERTS_INLINE int
leader_update(ErtsThrPrgrData *tpd)
{
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
    if (!tpd->leader) {
	/* Probably need to block... */
	block_thread(tpd);
    }
    else {
	ErtsThrPrgrVal current;
	int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged;
	erts_aint32_t lflgs;
	ErtsThrPrgrVal next;
	erts_aint_t refc;

	my_ix = tpd->id;

	if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) {
	    /* Took over as leader from another thread */
	    tpd->leader_state.current = read_nob(&erts_thr_prgr__.current);
	    tpd->leader_state.next = tpd->leader_state.current;
	    tpd->leader_state.next++;
	    if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING)
		tpd->leader_state.next = 0;
	    tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix;
	    tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting;
	    tpd->leader_state.umrefc_ix.current =
		(int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current);

	    if (tpd->confirmed == tpd->leader_state.current) {
		ErtsThrPrgrVal val = tpd->leader_state.current + 1;
		if (val == ERTS_THR_PRGR_VAL_WAITING)
		    val = 0;
		tpd->confirmed = val;
		set_mb(&intrnl->thr[my_ix].data.current, val);
	    }
	}


	next = tpd->leader_state.next;

	waiting_unmanaged = 0;
	umrefc_ix = -1; /* Shut up annoying warning */

	chk_next_ix = tpd->leader_state.chk_next_ix;
	no_managed = intrnl->managed.no;
	ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed);
	/* Check manged threads */
	if (chk_next_ix < no_managed) {
	    for (ix = chk_next_ix; ix < no_managed; ix++) {
		ErtsThrPrgrVal tmp;
		if (ix == my_ix)
		    continue;
		tmp = read_nob(&intrnl->thr[ix].data.current);
		if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
		    tpd->leader_state.chk_next_ix = ix;
		    ASSERT(erts_thr_progress_has_passed__(next, tmp));
		    goto done;
		}
	    }
	}

	/* Check unmanged threads */
	waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1;
	umrefc_ix = (waiting_unmanaged
		     ? tpd->leader_state.umrefc_ix.waiting
		     : tpd->leader_state.umrefc_ix.current);
	refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
	ASSERT(refc >= 0);
	if (refc != 0) {
	    int new_umrefc_ix;

	    if (waiting_unmanaged)
		goto done;

	    new_umrefc_ix = (umrefc_ix + 1) & 0x1;
	    tpd->leader_state.umrefc_ix.waiting = umrefc_ix;
	    tpd->leader_state.chk_next_ix = no_managed;
	    erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current,
				  (erts_aint32_t) new_umrefc_ix);
	    ETHR_MEMBAR(ETHR_StoreLoad);
	    refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
	    ASSERT(refc >= 0);
	    waiting_unmanaged = 1;
	    if (refc != 0)
		goto done;
	}

	/* Make progress */
	current = next;

	next++;
	if (next == ERTS_THR_PRGR_VAL_WAITING)
	    next = 0;

	set_nob(&intrnl->thr[my_ix].data.current, next);
	set_mb(&erts_thr_prgr__.current, current);
	tpd->confirmed = next;
	tpd->leader_state.next = next;
	tpd->leader_state.current = current;

#if ERTS_THR_PRGR_PRINT_VAL
	if (current % 1000 == 0)
	    erts_fprintf(stderr, "%b64u\n", current);
#endif
	handle_wakeup_requests(current);

	if (waiting_unmanaged) {
	    waiting_unmanaged = 0;
	    tpd->leader_state.umrefc_ix.waiting = -1;
	    erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs,
					~ERTS_THR_PRGR_LFLG_WAITING_UM);
	}
	tpd->leader_state.chk_next_ix = 0;

    done:

	if (tpd->active) {
	    lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		(void) block_thread(tpd);
	}
	else {
	    int force_wakeup_check = 0;
	    erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER;
	    tpd->leader = 0;
	    tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING;
#if ERTS_THR_PRGR_PRINT_LEADER
	    erts_fprintf(stderr, "L <- %d\n", tpd->id);
#endif

	    ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0);

	    intrnl->misc.data.umrefc_ix.waiting
		= tpd->leader_state.umrefc_ix.waiting;
	    if (waiting_unmanaged)
		set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM;

	    lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs,
						set_flags);
	    lflgs |= set_flags;
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		lflgs = block_thread(tpd);

	    if (waiting_unmanaged) {
		/* Need to check umrefc again */
		ETHR_MEMBAR(ETHR_StoreLoad);
		refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
		if (refc == 0) {
		    /* Need to force wakeup check */
		    force_wakeup_check = 1;
		}
	    }

	    if ((force_wakeup_check
		 || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
			       | ERTS_THR_PRGR_LFLG_WAITING_UM
			       | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
		     == ERTS_THR_PRGR_LFLG_NO_LEADER))
		&& got_sched_wakeups()) {
		/* Someone need to make progress */
		wakeup_managed(0);
	    }
	}
    }

    return tpd->leader;
}
Пример #20
0
ErtsMessage *
erts_try_alloc_message_on_heap(Process *pp,
			       erts_aint32_t *psp,
			       ErtsProcLocks *plp,
			       Uint sz,
			       Eterm **hpp,
			       ErlOffHeap **ohpp,
			       int *on_heap_p)
{
    int locked_main = 0;
    ErtsMessage *mp;

    ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ));

    if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
	goto in_message_fragment;
    else if (
	*plp & ERTS_PROC_LOCK_MAIN
	) {
    try_on_heap:
	if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
	    || (pp->flags & F_DISABLE_GC)
	    || HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) {
	    /*
	     * The heap is either potentially in an inconsistent
	     * state, or not large enough.
	     */
	    if (locked_main) {
		*plp &= ~ERTS_PROC_LOCK_MAIN;
		erts_proc_unlock(pp, ERTS_PROC_LOCK_MAIN);
	    }
	    goto in_message_fragment;
	}

	*hpp = HEAP_TOP(pp);
	HEAP_TOP(pp) = *hpp + sz;
	*ohpp = &MSO(pp);
	mp = erts_alloc_message(0, NULL);
	mp->data.attached = NULL;
	*on_heap_p = !0;
    }
    else if (pp && erts_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) {
	locked_main = 1;
	*psp = erts_atomic32_read_nob(&pp->state);
	*plp |= ERTS_PROC_LOCK_MAIN;
	goto try_on_heap;
    }
    else {
    in_message_fragment:
	if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) {
	    mp = erts_alloc_message(sz, hpp);
	    *ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap;
	}
	else {
	    mp = erts_alloc_message(0, NULL);
	    if (!sz) {
		*hpp = NULL;
		*ohpp = NULL;
	    }
	    else {
		ErlHeapFragment *bp;
		bp = new_message_buffer(sz);
		*hpp = &bp->mem[0];
		mp->data.heap_frag = bp;
		*ohpp = &bp->off_heap;
	    }
	}
	*on_heap_p = 0;
    }

    return mp;
}
Пример #21
0
Sint
erts_send_message(Process* sender,
		  Process* receiver,
		  ErtsProcLocks *receiver_locks,
		  Eterm message,
		  unsigned flags)
{
    Uint msize;
    ErtsMessage* mp;
    ErlOffHeap *ohp;
    Eterm token = NIL;
    Sint res = 0;
#ifdef USE_VM_PROBES
    DTRACE_CHARBUF(sender_name, 64);
    DTRACE_CHARBUF(receiver_name, 64);
    Sint tok_label = 0;
    Sint tok_lastcnt = 0;
    Sint tok_serial = 0;
    Eterm utag = NIL;
#endif
    erts_aint32_t receiver_state;
#ifdef SHCOPY_SEND
    erts_shcopy_t info;
#else
    erts_literal_area_t litarea;
    INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif

#ifdef USE_VM_PROBES
    *sender_name = *receiver_name = '\0';
    if (DTRACE_ENABLED(message_send)) {
        erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
		      "%T", sender->common.id);
        erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
		      "%T", receiver->common.id);
    }
#endif

    receiver_state = erts_atomic32_read_nob(&receiver->state);

    if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
        Eterm* hp;
	Eterm stoken = SEQ_TRACE_TOKEN(sender);
	Uint seq_trace_size = 0;
#ifdef USE_VM_PROBES
	Uint dt_utag_size = 0;
#endif

        /* SHCOPY corrupts the heap between
         * copy_shared_calculate, and
         * copy_shared_perform. (it inserts move_markers like the gc).
         * Make sure we don't use the heap between those instances.
         */
        if (have_seqtrace(stoken)) {
	    seq_trace_update_send(sender);
	    seq_trace_output(stoken, message, SEQ_TRACE_SEND,
			     receiver->common.id, sender);
	    seq_trace_size = 6; /* TUPLE5 */
	}
#ifdef USE_VM_PROBES
        if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
            dt_utag_size = size_object(DT_UTAG(sender));
        } else if (stoken == am_have_dt_utag ) {
            stoken = NIL;
        }
#endif

#ifdef SHCOPY_SEND
        INITIALIZE_SHCOPY(info);
        msize = copy_shared_calculate(message, &info);
#else
        msize = size_object_litopt(message, &litarea);
#endif
        mp = erts_alloc_message_heap_state(receiver,
                                           &receiver_state,
                                           receiver_locks,
                                           (msize
#ifdef USE_VM_PROBES
                                            + dt_utag_size
#endif
                                            + seq_trace_size),
                                           &hp,
                                           &ohp);

#ifdef SHCOPY_SEND
	if (is_not_immed(message))
            message = copy_shared_perform(message, msize, &info, &hp, ohp);
        DESTROY_SHCOPY(info);
#else
	if (is_not_immed(message))
            message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
	if (is_immed(stoken))
	    token = stoken;
	else
	    token = copy_struct(stoken, seq_trace_size, &hp, ohp);

#ifdef USE_VM_PROBES
	if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
	    if (is_immed(DT_UTAG(sender)))
		utag = DT_UTAG(sender);
	    else
		utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, ohp);
	}
        if (DTRACE_ENABLED(message_send)) {
            if (have_seqtrace(stoken)) {
		tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken));
		tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
		tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
	    }
	    DTRACE6(message_send, sender_name, receiver_name,
		    msize, tok_label, tok_lastcnt, tok_serial);
        }
#endif
    } else {
        Eterm *hp;

	if (receiver == sender && !(receiver_state & ERTS_PSFLG_OFF_HEAP_MSGQ)) {
	    mp = erts_alloc_message(0, NULL);
	    msize = 0;
	}
	else {
#ifdef SHCOPY_SEND
            INITIALIZE_SHCOPY(info);
            msize = copy_shared_calculate(message, &info);
#else
            msize = size_object_litopt(message, &litarea);
#endif
	    mp = erts_alloc_message_heap_state(receiver,
					       &receiver_state,
					       receiver_locks,
					       msize,
					       &hp,
					       &ohp);
#ifdef SHCOPY_SEND
            if (is_not_immed(message))
                message = copy_shared_perform(message, msize, &info, &hp, ohp);
            DESTROY_SHCOPY(info);
#else
            if (is_not_immed(message))
                message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
	}
#ifdef USE_VM_PROBES
        DTRACE6(message_send, sender_name, receiver_name,
                msize, tok_label, tok_lastcnt, tok_serial);
#endif
    }

    ERL_MESSAGE_TOKEN(mp) = token;
#ifdef USE_VM_PROBES
    ERL_MESSAGE_DT_UTAG(mp) = utag;
#endif
    res = queue_message(receiver,
			&receiver_state,
			*receiver_locks,
			mp, message,
                        sender->common.id);

    return res;
}
Пример #22
0
static void
setup_reference_table(void)
{
    ErlHeapFragment *hfp;
    DistEntry *dep;
    HashInfo hi;
    int i, max;
    DeclareTmpHeapNoproc(heap,3);

    inserted_bins = NULL;

    hash_get_info(&hi, &erts_node_table);
    referred_nodes = erts_alloc(ERTS_ALC_T_NC_TMP,
				hi.objs*sizeof(ReferredNode));
    no_referred_nodes = 0;
    hash_foreach(&erts_node_table, init_referred_node, NULL);
    ASSERT(no_referred_nodes == hi.objs);

    hash_get_info(&hi, &erts_dist_table);
    referred_dists = erts_alloc(ERTS_ALC_T_NC_TMP,
				hi.objs*sizeof(ReferredDist));
    no_referred_dists = 0;
    hash_foreach(&erts_dist_table, init_referred_dist, NULL);
    ASSERT(no_referred_dists == hi.objs);

    /* Go through the hole system, and build a table of all references
       to ErlNode and DistEntry structures */

    erts_debug_callback_timer_foreach(try_delete_node,
				      insert_delayed_delete_node,
				      NULL);
    erts_debug_callback_timer_foreach(try_delete_dist_entry,
				      insert_delayed_delete_dist_entry,
				      NULL);

    UseTmpHeapNoproc(3);
    insert_node(erts_this_node,
		SYSTEM_REF,
		TUPLE2(&heap[0], AM_system, am_undefined));

    UnUseTmpHeapNoproc(3);

    max = erts_ptab_max(&erts_proc);
    /* Insert all processes */
    for (i = 0; i < max; i++) {
	Process *proc = erts_pix2proc(i);
	if (proc) {
	    int mli;
	    ErtsMessage *msg_list[] = {
		proc->msg.first,
#ifdef ERTS_SMP
		proc->msg_inq.first,
#endif
		proc->msg_frag};

	    /* Insert Heap */
	    insert_offheap(&(proc->off_heap),
			   HEAP_REF,
			   proc->common.id);
	    /* Insert heap fragments buffers */
	    for(hfp = proc->mbuf; hfp; hfp = hfp->next)
		insert_offheap(&(hfp->off_heap),
			       HEAP_REF,
			       proc->common.id);

	    /* Insert msg buffers */
	    for (mli = 0; mli < sizeof(msg_list)/sizeof(msg_list[0]); mli++) {
		ErtsMessage *msg;
		for (msg = msg_list[mli]; msg; msg = msg->next) {
		    ErlHeapFragment *heap_frag = NULL;
		    if (msg->data.attached) {
			if (msg->data.attached == ERTS_MSG_COMBINED_HFRAG)
			    heap_frag = &msg->hfrag;
			else if (is_value(ERL_MESSAGE_TERM(msg)))
			    heap_frag = msg->data.heap_frag;
			else {
			    if (msg->data.dist_ext->dep)
				insert_dist_entry(msg->data.dist_ext->dep,
						  HEAP_REF, proc->common.id, 0);
			    if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
				heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
			}
		    }
		    while (heap_frag) {
			insert_offheap(&(heap_frag->off_heap),
				       HEAP_REF,
				       proc->common.id);
			heap_frag = heap_frag->next;
		    }
		}
	    }
	    /* Insert links */
	    if (ERTS_P_LINKS(proc))
		insert_links(ERTS_P_LINKS(proc), proc->common.id);
	    if (ERTS_P_MONITORS(proc))
		insert_monitors(ERTS_P_MONITORS(proc), proc->common.id);
	    /* Insert controller */
	    {
		DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
		if (dep)
		    insert_dist_entry(dep, CTRL_REF, proc->common.id, 0);
	    }
	}
    }
    
#ifdef ERTS_SMP
    erts_foreach_sys_msg_in_q(insert_sys_msg);
#endif

    /* Insert all ports */
    max = erts_ptab_max(&erts_port);
    for (i = 0; i < max; i++) {
	ErlOffHeap *ohp;
	erts_aint32_t state;
	Port *prt;

	prt = erts_pix2port(i);
	if (!prt)
	    continue;

	state = erts_atomic32_read_nob(&prt->state);
	if (state & ERTS_PORT_SFLGS_DEAD)
	    continue;

	/* Insert links */
	if (ERTS_P_LINKS(prt))
	    insert_links(ERTS_P_LINKS(prt), prt->common.id);
	/* Insert monitors */
	if (ERTS_P_MONITORS(prt))
	    insert_monitors(ERTS_P_MONITORS(prt), prt->common.id);
	/* Insert port data */
	ohp = erts_port_data_offheap(prt);
	if (ohp)
	    insert_offheap(ohp, HEAP_REF, prt->common.id);
	/* Insert controller */
	if (prt->dist_entry)
	    insert_dist_entry(prt->dist_entry,
			      CTRL_REF,
			      prt->common.id,
			      0);
    }

    { /* Add binaries stored elsewhere ... */
	ErlOffHeap oh;
	ProcBin pb[2];
	int i = 0;
	Binary *default_match_spec;
	Binary *default_meta_match_spec;

	oh.first = NULL;
	/* Only the ProcBin members thing_word, val and next will be inspected
	   (by insert_offheap()) */
#undef  ADD_BINARY
#define ADD_BINARY(Bin)				 	     \
	if ((Bin)) {					     \
	    pb[i].thing_word = REFC_BINARY_SUBTAG;           \
	    pb[i].val = (Bin);				     \
	    pb[i].next = oh.first;		             \
	    oh.first = (struct erl_off_heap_header*) &pb[i]; \
	    i++;				             \
	}

	erts_get_default_trace_pattern(NULL,
				       &default_match_spec,
				       &default_meta_match_spec,
				       NULL,
				       NULL);

	ADD_BINARY(default_match_spec);
	ADD_BINARY(default_meta_match_spec);

	insert_offheap(&oh, BIN_REF, AM_match_spec);
#undef  ADD_BINARY
    }

    /* Insert all dist links */

    for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
	if(dep->nlinks)
	    insert_links2(dep->nlinks, dep->sysname);
	if(dep->node_links)
	    insert_links(dep->node_links, dep->sysname);
	if(dep->monitors)
	    insert_monitors(dep->monitors, dep->sysname);
    }

    for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
	if(dep->nlinks)
	    insert_links2(dep->nlinks, dep->sysname);
	if(dep->node_links)
	    insert_links(dep->node_links, dep->sysname);
	if(dep->monitors)
	    insert_monitors(dep->monitors, dep->sysname);
    }

    /* Not connected dist entries should not have any links,
       but inspect them anyway */
    for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
	if(dep->nlinks)
	    insert_links2(dep->nlinks, dep->sysname);
	if(dep->node_links)
	    insert_links(dep->node_links, dep->sysname);
	if(dep->monitors)
	    insert_monitors(dep->monitors, dep->sysname);
    }

    /* Insert all ets tables */
    erts_db_foreach_table(insert_ets_table, NULL);

    /* Insert all bif timers */
    erts_debug_bif_timer_foreach(insert_bif_timer, NULL);

    /* Insert node table (references to dist) */
    hash_foreach(&erts_node_table, insert_erl_node, NULL);
}
Пример #23
0
/* Add messages last in message queue */
static Sint
queue_messages(Process* receiver,
               erts_aint32_t *receiver_state,
               ErtsProcLocks receiver_locks,
               ErtsMessage* first,
               ErtsMessage** last,
               Uint len,
               Eterm from)
{
    ErtsTracingEvent* te;
    Sint res;
    int locked_msgq = 0;
    erts_aint32_t state;

    ASSERT(is_value(ERL_MESSAGE_TERM(first)));
    ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined ||
           ERL_MESSAGE_TOKEN(first) == NIL ||
           is_tuple(ERL_MESSAGE_TOKEN(first)));

#ifdef ERTS_ENABLE_LOCK_CHECK
    ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
                       receiver_locks == erts_proc_lc_my_proc_locks(receiver));
#endif

    if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
	if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
            ErtsProcLocks need_locks;

	    if (receiver_state)
		state = *receiver_state;
	    else
		state = erts_atomic32_read_nob(&receiver->state);
	    if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
		goto exiting;

            need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
	    if (need_locks) {
		erts_proc_unlock(receiver, need_locks);
	    }
            need_locks |= ERTS_PROC_LOCK_MSGQ;
	    erts_proc_lock(receiver, need_locks);
	}
	locked_msgq = 1;
    }


    state = erts_atomic32_read_nob(&receiver->state);

    if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
    exiting:
	/* Drop message if receiver is exiting or has a pending exit... */
	if (locked_msgq)
	    erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
	erts_cleanup_messages(first);
	return 0;
    }

    res = receiver->msg.len;
    if (receiver_locks & ERTS_PROC_LOCK_MAIN) {
	/*
	 * We move 'in queue' to 'private queue' and place
	 * message at the end of 'private queue' in order
	 * to ensure that the 'in queue' doesn't contain
	 * references into the heap. By ensuring this,
	 * we don't need to include the 'in queue' in
	 * the root set when garbage collecting.
	 */
	res += receiver->msg_inq.len;
	ERTS_MSGQ_MV_INQ2PRIVQ(receiver);
        LINK_MESSAGE_PRIVQ(receiver, first, last, len);
    }
    else
    {
	LINK_MESSAGE(receiver, first, last, len);
    }

    if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)
        && (te = &erts_receive_tracing[erts_active_bp_ix()],
            te->on)) {

        ErtsMessage *msg = first;

#ifdef USE_VM_PROBES
        if (DTRACE_ENABLED(message_queued)) {
            DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
            Sint tok_label = 0;
            Sint tok_lastcnt = 0;
            Sint tok_serial = 0;
            Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);

            dtrace_proc_str(receiver, receiver_name);
            if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
                tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
                tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
                tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
            }
            DTRACE6(message_queued,
                    receiver_name, size_object(ERL_MESSAGE_TERM(msg)),
                    receiver->msg.len,
                    tok_label, tok_lastcnt, tok_serial);
        }
#endif
        while (msg) {
            trace_receive(receiver, from, ERL_MESSAGE_TERM(msg), te);
            msg = msg->next;
        }

    }
    if (locked_msgq) {
	erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
    }

    erts_proc_notify_new_message(receiver, receiver_locks);
    return res;
}
Пример #24
0
/*
 * Try to grab locks one at a time in lock order and wait on the lowest
 * lock we fail to grab, if any.
 *
 * If successful, this returns 0 and all locks in 'need_locks' are held.
 *
 * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL.
 * On exit it is not held.
 */
static void
wait_for_locks(Process *p,
               erts_pix_lock_t *pixlck,
	       ErtsProcLocks locks,
               ErtsProcLocks need_locks,
               ErtsProcLocks olflgs)
{
    erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
    erts_tse_t *wtr;
    erts_proc_lock_queues_t *qs;

    /* Acquire a waiter object on which this thread can wait. */
    wtr = tse_fetch(pix_lock);
    
    /* Record which locks this waiter needs. */
    wtr->uflgs = need_locks;

    ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);

#if ERTS_PROC_LOCK_ATOMIC_IMPL
    erts_pix_lock(pix_lock);
#endif

    ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));

    qs = wtr->udata;
    ASSERT(qs);
    /* Provide the process with waiter queues, if it doesn't have one. */
    if (!p->lock.queues) {
	qs->next = NULL;
	p->lock.queues = qs;
    }
    else {
	qs->next = p->lock.queues->next;
	p->lock.queues->next = qs;
    }

#ifdef ERTS_PROC_LOCK_HARD_DEBUG
    check_queue(&p->lock);
#endif

    /* Try to aquire locks one at a time in lock order and set wait flag */
    try_aquire(&p->lock, wtr);

    ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);

#ifdef ERTS_PROC_LOCK_HARD_DEBUG
    check_queue(&p->lock);
#endif

    if (wtr->uflgs) {
	/* We didn't get them all; need to wait... */

	ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);

	erts_atomic32_set_nob(&wtr->uaflgs, 1);
	erts_pix_unlock(pix_lock);

	while (1) {
	    int res;
	    erts_tse_reset(wtr);

	    if (erts_atomic32_read_nob(&wtr->uaflgs) == 0)
		break;

	    /*
	     * Wait for needed locks. When we are woken all needed locks have
	     * have been acquired by other threads and transfered to us.
	     * However, we need to be prepared for spurious wakeups.
	     */
	    do {
		res = erts_tse_wait(wtr); /* might return EINTR */
	    } while (res != 0);
	}

	erts_pix_lock(pix_lock);

	ASSERT(wtr->uflgs == 0);
    }

    /* Recover some queues to store in the waiter. */
    ERTS_LC_ASSERT(p->lock.queues);
    if (p->lock.queues->next) {
	qs = p->lock.queues->next;
	p->lock.queues->next = qs->next;
    }
    else {
	qs = p->lock.queues;
	p->lock.queues = NULL;
    }
    wtr->udata = qs;

    erts_pix_unlock(pix_lock);

    ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));

    tse_return(wtr, 0);
}
Пример #25
0
Sint
erts_move_messages_off_heap(Process *c_p)
{
    int reds = 1;
    /*
     * Move all messages off heap. This *only* occurs when the
     * process had off heap message disabled and just enabled
     * it...
     */
    ErtsMessage *mp;

    reds += c_p->msg.len / 10;

    ASSERT(erts_atomic32_read_nob(&c_p->state)
	   & ERTS_PSFLG_OFF_HEAP_MSGQ);
    ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG);

    for (mp = c_p->msg.first; mp; mp = mp->next) {
	Uint msg_sz, token_sz;
#ifdef USE_VM_PROBES
	Uint utag_sz;
#endif
	Eterm *hp;
	ErlHeapFragment *hfrag;

	if (mp->data.attached)
	    continue;

	if (is_immed(ERL_MESSAGE_TERM(mp))
#ifdef USE_VM_PROBES
	    && is_immed(ERL_MESSAGE_DT_UTAG(mp))
#endif
	    && is_not_immed(ERL_MESSAGE_TOKEN(mp)))
	    continue;

	/*
	 * The message refers into the heap. Copy the message
	 * from the heap into a heap fragment and attach
	 * it to the message...
	 */
	msg_sz = size_object(ERL_MESSAGE_TERM(mp));
#ifdef USE_VM_PROBES
	utag_sz = size_object(ERL_MESSAGE_DT_UTAG(mp));
#endif
	token_sz = size_object(ERL_MESSAGE_TOKEN(mp));

	hfrag = new_message_buffer(msg_sz
#ifdef USE_VM_PROBES
				   + utag_sz
#endif
				   + token_sz);
	hp = hfrag->mem;
	if (is_not_immed(ERL_MESSAGE_TERM(mp)))
	    ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp),
                                               msg_sz, &hp,
                                               &hfrag->off_heap);
	if (is_not_immed(ERL_MESSAGE_TOKEN(mp)))
	    ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
						token_sz, &hp,
						&hfrag->off_heap);
#ifdef USE_VM_PROBES
	if (is_not_immed(ERL_MESSAGE_DT_UTAG(mp)))
	    ERL_MESSAGE_DT_UTAG(mp) = copy_struct(ERL_MESSAGE_DT_UTAG(mp),
						  utag_sz, &hp,
						  &hfrag->off_heap);
#endif
	mp->data.heap_frag = hfrag;
	reds += 1;
    }

    return reds;
}
Пример #26
0
Eterm
erts_change_message_queue_management(Process *c_p, Eterm new_state)
{
    Eterm res;

#ifdef DEBUG
    if (c_p->flags & F_OFF_HEAP_MSGQ) {
	ASSERT(erts_atomic32_read_nob(&c_p->state)
	       & ERTS_PSFLG_OFF_HEAP_MSGQ);
    }
    else {
	if (c_p->flags & F_OFF_HEAP_MSGQ_CHNG) {
	    ASSERT(erts_atomic32_read_nob(&c_p->state)
		   & ERTS_PSFLG_OFF_HEAP_MSGQ);
	}
	else {
	    ASSERT(!(erts_atomic32_read_nob(&c_p->state)
		     & ERTS_PSFLG_OFF_HEAP_MSGQ));
	}
    }
#endif

    switch (c_p->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) {

    case F_OFF_HEAP_MSGQ:
	res = am_off_heap;

	switch (new_state) {
	case am_off_heap:
	    break;
	case am_on_heap:
	    c_p->flags |= F_ON_HEAP_MSGQ;
	    c_p->flags &= ~F_OFF_HEAP_MSGQ;
	    erts_atomic32_read_bor_nob(&c_p->state,
					   ERTS_PSFLG_ON_HEAP_MSGQ);
	    /*
	     * We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ
	     * if a off heap change is ongoing. It will be adjusted
	     * when the change completes...
	     */
	    if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) {
		/* Safe to clear ERTS_PSFLG_OFF_HEAP_MSGQ... */
		erts_atomic32_read_band_nob(&c_p->state,
						~ERTS_PSFLG_OFF_HEAP_MSGQ);
	    }
	    break;
	default:
	    res = THE_NON_VALUE; /* badarg */
	    break;
	}
	break;

    case F_ON_HEAP_MSGQ:
	res = am_on_heap;

	switch (new_state) {
	case am_on_heap:
	    break;
	case am_off_heap:
	    c_p->flags &= ~F_ON_HEAP_MSGQ;
	    erts_atomic32_read_band_nob(&c_p->state,
					    ~ERTS_PSFLG_ON_HEAP_MSGQ);
	    goto change_to_off_heap;
	default:
	    res = THE_NON_VALUE; /* badarg */
	    break;
	}
	break;

    default:
	res = am_error;
	ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
	break;
    }

    return res;

change_to_off_heap:

    c_p->flags |= F_OFF_HEAP_MSGQ;

    /*
     * We do not have to schedule a change if
     * we have an ongoing off heap change...
     */
    if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) {
	ErtsChangeOffHeapMessageQueue *cohmq;
	/*
	 * Need to set ERTS_PSFLG_OFF_HEAP_MSGQ and wait
	 * thread progress before completing the change in
	 * order to ensure that all senders observe that
	 * messages should be passed off heap. When the
	 * change has completed, GC does not need to inspect
	 * the message queue at all.
	 */
	erts_atomic32_read_bor_nob(&c_p->state,
				       ERTS_PSFLG_OFF_HEAP_MSGQ);
	c_p->flags |= F_OFF_HEAP_MSGQ_CHNG;
	cohmq = erts_alloc(ERTS_ALC_T_MSGQ_CHNG,
			   sizeof(ErtsChangeOffHeapMessageQueue));
	cohmq->pid = c_p->common.id;
	erts_schedule_thr_prgr_later_op(change_off_heap_msgq,
					(void *) cohmq,
					&cohmq->lop);
    }

    return res;
}
Пример #27
0
/* 
   You have to have loaded the driver and the pid state 
   is LOADED or AWAIT_LOAD. You will be removed from the list
   regardless of driver state.
   If the driver is loaded by someone else to, return is
   {ok, pending_process}
   If the driver is loaded but locked by a port, return is
   {ok, pending_driver}
   If the driver is loaded and free to unload (you're the last holding it)
   {ok, unloaded}
   If it's not loaded or not loaded by you
   {error, not_loaded} or {error, not_loaded_by_you}

   Internally, if its in state UNLOADING, just return {ok, pending_driver} and
   remove/decrement this pid (which should be an LOADED tagged one).
   If the state is RELOADING, this pid should be in list as LOADED tagged, 
   only AWAIT_LOAD would be possible but not allowed for unloading, remove it 
   and, if the last LOADED tagged, change from RELOAD to UNLOAD and notify
   any AWAIT_LOAD-waiters with {'DOWN', ref(), driver, name(), load_cancelled}
   If the driver made itself permanent, {'UP', ref(), driver, name(), permanent}
*/
Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
{
    Eterm name_term = BIF_ARG_1;
    Eterm options = BIF_ARG_2;
    char *name = NULL;
    Eterm ok_term = NIL;
    Eterm soft_error_term = NIL;
    erts_driver_t *drv;
    DE_Handle *dh;
    DE_ProcEntry *pe;
    Eterm *hp;
    Eterm t;
    int monitor = 0;
    Eterm l;
    int kill_ports = 0;

    erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);

    for(l = options; is_list(l); l =  CDR(list_val(l))) {
	Eterm opt = CAR(list_val(l));
	Eterm *tp;
	if (is_not_tuple(opt)) {
	    if (opt == am_kill_ports) {
		kill_ports = 1;
		continue;
	    } else {
		goto error;
	    }
	}
	tp = tuple_val(opt);
	if (*tp != make_arityval(2) || tp[1] != am_monitor) {
	    goto error;
	}
	if (tp[2] == am_pending_driver) { 
	    monitor = 1;
	} else if (tp[2] == am_pending) {
	    monitor = 2;
	} else {
	    goto error;
	}
    }
    if (is_not_nil(l)) {
	goto error;
    }

    if ((name = pick_list_or_atom(name_term)) == NULL) {
	goto error;
    }

    lock_drv_list();

    if ((drv = lookup_driver(name)) == NULL) {
	soft_error_term = am_not_loaded;
	goto soft_error;
    }

    if (drv->handle == NULL) {
	soft_error_term = am_linked_in_driver;
	goto soft_error;
    } else if (drv->handle->status == ERL_DE_PERMANENT) {
	soft_error_term = am_permanent;
	goto soft_error;
    }	
    dh = drv->handle;
    if (dh->flags & ERL_DE_FL_KILL_PORTS) {
	kill_ports = 1;
    }
    if ((pe = find_proc_entry(dh, BIF_P, ERL_DE_PROC_LOADED)) == NULL) {
	if (num_procs(dh, ERL_DE_PROC_LOADED) > 0) {
	    soft_error_term = am_not_loaded_by_this_process;
	    goto soft_error;
	}
    } else {
	remove_proc_entry(dh, pe);
	if (!(pe->flags & ERL_DE_FL_DEREFERENCED)) {
	    erts_ddll_dereference_driver(dh);
	}
	erts_free(ERTS_ALC_T_DDLL_PROCESS, pe);
    }
    if (num_procs(dh, ERL_DE_PROC_LOADED) > 0) {
	ok_term = am_pending_process;
	--monitor;
	goto done;
    }
    if (dh->status == ERL_DE_RELOAD ||
	dh->status == ERL_DE_FORCE_RELOAD) {
	notify_all(dh, drv->name, 
		   ERL_DE_PROC_AWAIT_LOAD, am_DOWN, am_load_cancelled);
	erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_full_path);
	erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_driver_name);
	dh->reload_full_path = dh->reload_driver_name = NULL; 
	dh->reload_flags = 0;
    } 
    if (erts_atomic32_read_nob(&dh->port_count) > 0) {
	++kill_ports;
    }
    dh->status = ERL_DE_UNLOAD;
    ok_term = am_pending_driver;
done:
    assert_drv_list_rwlocked();
    if (kill_ports > 1) {
	/* Avoid closing the driver by referencing it */
	erts_ddll_reference_driver(dh);
	dh->status = ERL_DE_FORCE_UNLOAD;
	unlock_drv_list();
	kill_ports_driver_unloaded(dh);
	lock_drv_list(); 
	erts_ddll_dereference_driver(dh);
    } 

    erts_ddll_reference_driver(dh);
    unlock_drv_list();
    erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
    lock_drv_list();
    erts_ddll_dereference_driver(dh);
    erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
    BIF_P->flags |= F_USING_DDLL;
    if (monitor > 0) {
	Eterm mref = add_monitor(BIF_P, dh, ERL_DE_PROC_AWAIT_UNLOAD);
	hp = HAlloc(BIF_P, 4);
	t = TUPLE3(hp, am_ok, ok_term, mref);
    } else {
	hp = HAlloc(BIF_P, 3);
	t = TUPLE2(hp, am_ok, ok_term);
    }
    if (kill_ports > 1) {
	ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */
    }
    unlock_drv_list();
    BIF_RET(t);
 
soft_error:
    unlock_drv_list();
    erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
    erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
    hp = HAlloc(BIF_P, 3);
    t = TUPLE2(hp, am_error, soft_error_term);
    BIF_RET(t);
 
 error: /* No lock fiddling before going here */
    assert_drv_list_not_locked();
    if (name != NULL) {
	erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
    }
    erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
    BIF_ERROR(BIF_P, BADARG);
}
Пример #28
0
ErtsMessage *
erts_factory_message_create(ErtsHeapFactory* factory,
			    Process *proc,
			    ErtsProcLocks *proc_locksp,
			    Uint sz)
{
    Eterm *hp;
    ErlOffHeap *ohp;
    ErtsMessage *msgp;
    int on_heap;
    erts_aint32_t state;

    state = proc ? erts_atomic32_read_nob(&proc->state) : 0;

    if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
	msgp = erts_alloc_message(sz, &hp);
	ohp = sz == 0 ? NULL : &msgp->hfrag.off_heap;
	on_heap = 0;
    }
    else {
	msgp = erts_try_alloc_message_on_heap(proc, &state,
					      proc_locksp,
					      sz, &hp, &ohp,
					      &on_heap);
    }

    if (on_heap) {
	ERTS_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN);
	ASSERT(ohp == &proc->off_heap);
	factory->mode = FACTORY_HALLOC;
	factory->p = proc;
	factory->heap_frags_saved = proc->mbuf;
	factory->heap_frags_saved_used = proc->mbuf ? proc->mbuf->used_size : 0;
    }
    else {
	factory->mode = FACTORY_MESSAGE;
	factory->p = NULL;
	factory->heap_frags_saved = NULL;
	factory->heap_frags_saved_used = 0;

	if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) {
	    ASSERT(!msgp->hfrag.next);
	    factory->heap_frags = NULL;
	}
	else {
	    ASSERT(!msgp->data.heap_frag
		   || !msgp->data.heap_frag->next);
	    factory->heap_frags = msgp->data.heap_frag;
	}
    }
    factory->hp_start = hp;
    factory->hp       = hp;
    factory->hp_end   = hp + sz;
    factory->message  = msgp;
    factory->off_heap = ohp;
    factory->alloc_type = ERTS_ALC_T_HEAP_FRAG;
    if (ohp) {
	factory->off_heap_saved.first    = ohp->first;
	factory->off_heap_saved.overhead = ohp->overhead;
    }
    else {
	factory->off_heap_saved.first    = NULL;
	factory->off_heap_saved.overhead = 0;
    }

    ASSERT(factory->hp >= factory->hp_start && factory->hp <= factory->hp_end);

    return msgp;
}
Пример #29
0
/* 
 * More detailed info about loaded drivers: 
 * item is processes, driver_options, port_count, linked_in_driver, 
 * permanent, awaiting_load, awaiting_unload 
 */
BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
{
    Process *p = BIF_P;
    Eterm name_term = BIF_ARG_1;
    Eterm item = BIF_ARG_2;
    char *name = NULL;
    Eterm res = NIL;
    erts_driver_t *drv;
    ProcEntryInfo *pei = NULL;
    int num_pei;
    Eterm *hp;
    int i;
    Uint filter;
    int have_lock = 0;

    if ((name = pick_list_or_atom(name_term)) == NULL) {
	goto error;
    }

    if (!is_atom(item)) {
	goto error;
    }

    lock_drv_list();
    have_lock = 1;
    if ((drv = lookup_driver(name)) == NULL) {
	goto error;
    }
    
    switch (item) {
    case am_processes:
	filter = ERL_DE_PROC_LOADED;
	break;
    case am_driver_options:
	if (drv->handle == NULL) {
	    res = am_linked_in_driver;
	} else {
	    Uint start_flags = drv->handle->flags & ERL_FL_CONSISTENT_MASK;
	    /* Cheating, only one flag for now... */
	    if (start_flags & ERL_DE_FL_KILL_PORTS) {
		Eterm *myhp;
		myhp = HAlloc(p,2);
		res = CONS(myhp,am_kill_ports,NIL);
	    } else {
		res = NIL;
	    }
	}
	goto done;
    case am_port_count:
	if (drv->handle == NULL) {
	    res = am_linked_in_driver;
	} else if (drv->handle->status == ERL_DE_PERMANENT) {
	    res = am_permanent;
	} else {
	    res = make_small(erts_atomic32_read_nob(&drv->handle->port_count));
	}
	goto done;
    case am_linked_in_driver:
	if (drv->handle == NULL){
	    res = am_true;
	} else {
	    res = am_false;
	}
	goto done;
    case am_permanent:
	if (drv->handle != NULL && drv->handle->status == ERL_DE_PERMANENT) {
	    res = am_true;
	} else {
	    res = am_false;
	}
	goto done;
    case am_awaiting_load:
	filter = ERL_DE_PROC_AWAIT_LOAD;
	break;
    case am_awaiting_unload:
	filter = ERL_DE_PROC_AWAIT_UNLOAD;
	break;
    default:
	goto error;
    }

    if (drv->handle == NULL) {
	res = am_linked_in_driver;
	goto done;
    } else if (drv->handle->status == ERL_DE_PERMANENT) {
	res = am_permanent;
	goto done;
    }
    num_pei = build_proc_info(drv->handle, &pei, filter);
    if (!num_pei) {
	goto done;
    }
    hp = HAlloc(p,num_pei * (2+3));
    for (i = 0; i < num_pei; ++ i) {
	Eterm tpl = TUPLE2(hp,pei[i].pid,make_small(pei[i].count));
	hp += 3;
	res = CONS(hp,tpl,res);
	hp += 2;
    }    
 done:    
    unlock_drv_list();
    if (pei)
	erts_free(ERTS_ALC_T_DDLL_TMP_BUF, pei);
    erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
    BIF_RET(res);
 error:
    if (name != NULL) {
	erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
    }
    if (have_lock) {
	unlock_drv_list();
    }
    BIF_ERROR(p,BADARG);
}
Пример #30
0
/*
 * Try to grab locks one at a time in lock order and wait on the lowest
 * lock we fail to grab, if any.
 *
 * If successful, this returns 0 and all locks in 'need_locks' are held.
 *
 * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL.
 * On exit it is not held.
 */
static void
wait_for_locks(Process *p,
               erts_pix_lock_t *pixlck,
	       ErtsProcLocks locks,
               ErtsProcLocks need_locks,
               ErtsProcLocks olflgs)
{
    erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
    erts_tse_t *wtr;

    /* Acquire a waiter object on which this thread can wait. */
    wtr = tse_fetch(pix_lock);
    
    /* Record which locks this waiter needs. */
    wtr->uflgs = need_locks;

    ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);

#if ERTS_PROC_LOCK_ATOMIC_IMPL
    erts_pix_lock(pix_lock);
#endif

    ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));

#ifdef ERTS_PROC_LOCK_HARD_DEBUG
    check_queue(&p->lock);
#endif

    /* Try to aquire locks one at a time in lock order and set wait flag */
    try_aquire(&p->lock, wtr);

    ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);

#ifdef ERTS_PROC_LOCK_HARD_DEBUG
    check_queue(&p->lock);
#endif

    if (wtr->uflgs == 0)
	erts_pix_unlock(pix_lock);
    else {
	/* We didn't get them all; need to wait... */

	ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);

	erts_atomic32_set_nob(&wtr->uaflgs, 1);
	erts_pix_unlock(pix_lock);

	while (1) {
	    int res;
	    erts_tse_reset(wtr);

	    if (erts_atomic32_read_nob(&wtr->uaflgs) == 0)
		break;

	    /*
	     * Wait for needed locks. When we are woken all needed locks have
	     * have been acquired by other threads and transfered to us.
	     * However, we need to be prepared for spurious wakeups.
	     */
	    do {
		res = erts_tse_wait(wtr); /* might return EINTR */
	    } while (res != 0);
	}

	ASSERT(wtr->uflgs == 0);
    }

    ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));

    tse_return(wtr);
}