Example #1
0
void
get_sys_now(Uint* megasec, Uint* sec, Uint* microsec)
{
    SysTimeval now;
    
    erts_smp_mtx_lock(&erts_timeofday_mtx);
    
    sys_gettimeofday(&now);
    
    erts_smp_mtx_unlock(&erts_timeofday_mtx);
    
    *megasec = (Uint) (now.tv_sec / 1000000);
    *sec = (Uint) (now.tv_sec % 1000000);
    *microsec = (Uint) (now.tv_usec);
}
Example #2
0
/*
** Find an object in the hash table
*/
void* safe_hash_get(SafeHash* h, void* tmpl)
{
    SafeHashValue hval = h->fun.hash(tmpl);
    SafeHashBucket* b;
    erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
    erts_smp_mtx_lock(lock);
    b = h->tab[hval & h->size_mask];
	
    while(b != NULL) {
	if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0))
	    break;
	b = b->next;
    }
    erts_smp_mtx_unlock(lock);
    return (void*) b;
}
Example #3
0
static ErtsMonotonicTime clock_gettime_monotonic_verified(void)
{
    ErtsMonotonicTime mtime;

    mtime = (ErtsMonotonicTime) posix_clock_gettime(MONOTONIC_CLOCK_ID,
						    MONOTONIC_CLOCK_ID_STR);

    erts_smp_mtx_lock(&internal_state.w.f.mtx);
    if (mtime < internal_state.w.f.last_delivered)
	mtime = internal_state.w.f.last_delivered;
    else
	internal_state.w.f.last_delivered = mtime;
    erts_smp_mtx_unlock(&internal_state.w.f.mtx);

    return mtime;
}
Example #4
0
static void clock_gettime_times_verified(ErtsMonotonicTime *mtimep,
					 ErtsSystemTime *stimep)
{
    posix_clock_gettime_times(MONOTONIC_CLOCK_ID,
			      MONOTONIC_CLOCK_ID_STR,
			      mtimep,
			      WALL_CLOCK_ID,
			      WALL_CLOCK_ID_STR,
			      stimep);

    erts_smp_mtx_lock(&internal_state.w.f.mtx);
    if (*mtimep < internal_state.w.f.last_delivered)
	*mtimep = internal_state.w.f.last_delivered;
    else
	internal_state.w.f.last_delivered = *mtimep;
    erts_smp_mtx_unlock(&internal_state.w.f.mtx);
}
Example #5
0
void erts_release_code_write_permission(void)
{
    erts_smp_mtx_lock(&the_code_ix_queue_lock);
    while (the_code_ix_queue != NULL) { /* unleash the entire herd */
	struct code_ix_queue_item* qitem = the_code_ix_queue;
	erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
	if (!ERTS_PROC_IS_EXITING(qitem->p)) {
	    erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
	}
	erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
	the_code_ix_queue = qitem->next;
	erts_smp_proc_dec_refc(qitem->p);
	erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
    }
    the_code_ix_lock = 0;
    erts_smp_mtx_unlock(&the_code_ix_queue_lock);
}
Example #6
0
ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
{
    ErlNode *res;
    ErlNode ne;
    ne.sysname = sysname;
    ne.creation = creation;
    erts_smp_mtx_lock(&erts_node_table_mtx);
    res = hash_put(&erts_node_table, (void *) &ne);
    ASSERT(res);
    if (res != erts_this_node) {
	long refc = erts_refc_inctest(&res->refc, 0);
	if (refc < 2) /* New or pending delete */
	    erts_refc_inc(&res->refc, 1);
    }
    erts_smp_mtx_unlock(&erts_node_table_mtx);
    return res;
}
Example #7
0
void erts_delete_node(ErlNode *enp)
{
    ASSERT(enp != erts_this_node);
    if(enp != erts_this_node) {
	erts_smp_mtx_lock(&erts_node_table_mtx);
	/*
	 * Another thread might have looked up this node after we
	 * decided to delete it (refc became zero). If so, the other
	 * thread incremented refc twice. Once for the new reference
	 * and once for this thread. Therefore, delete node if refc
	 * is 0 or -1 after a decrement.
	 */
	if (erts_refc_dectest(&enp->refc, -1) <= 0)
	    (void) hash_erase(&erts_node_table, (void *) enp);
	erts_smp_mtx_unlock(&erts_node_table_mtx);
    }
}
Example #8
0
void erts_delete_dist_entry(DistEntry *dep)
{
    ASSERT(dep != erts_this_dist_entry);
    if(dep != erts_this_dist_entry) {
	erts_smp_mtx_lock(&erts_dist_table_mtx);
	/*
	 * Another thread might have looked up this dist entry after
	 * we decided to delete it (refc became zero). If so, the other
	 * thread incremented refc twice. Once for the new reference
	 * and once for this thread. Therefore, delete dist entry if
	 * refc is 0 or -1 after a decrement.
	 */
	if (erts_refc_dectest(&dep->refc, -1) <= 0)
	    (void) hash_erase(&erts_dist_table, (void *) dep);
	erts_smp_mtx_unlock(&erts_dist_table_mtx);
    }
}
Example #9
0
Uint
erts_dist_table_size(void)
{
    Uint res;
#ifdef DEBUG
    HashInfo hi;
    DistEntry *dep;
    int i;
#endif
    int lock = !ERTS_IS_CRASH_DUMPING;

    if (lock)
	erts_smp_mtx_lock(&erts_dist_table_mtx);
#ifdef DEBUG
    hash_get_info(&hi, &erts_dist_table);
    ASSERT(dist_entries == hi.objs);

    i = 0;
    for(dep = erts_visible_dist_entries; dep; dep = dep->next)
	i++;
    ASSERT(i == erts_no_of_visible_dist_entries);
    i = 0;
    for(dep = erts_hidden_dist_entries; dep; dep = dep->next)
	i++;
    ASSERT(i == erts_no_of_hidden_dist_entries);
    i = 0;
    for(dep = erts_not_connected_dist_entries; dep; dep = dep->next)
	i++;
    ASSERT(i == erts_no_of_not_connected_dist_entries);

    ASSERT(dist_entries == (erts_no_of_visible_dist_entries
			    + erts_no_of_hidden_dist_entries
			    + erts_no_of_not_connected_dist_entries
			    + 1 /* erts_this_dist_entry */));
#endif

    res = (hash_table_sz(&erts_dist_table)
	   + dist_entries*sizeof(DistEntry)
	   + erts_dist_cache_size());
    if (lock)
	erts_smp_mtx_unlock(&erts_dist_table_mtx);
    return res;
}
Example #10
0
Uint
erts_node_table_size(void)
{
    Uint res;
#ifdef DEBUG
    HashInfo hi;
#endif
    int lock = !ERTS_IS_CRASH_DUMPING;
    if (lock)
	erts_smp_mtx_lock(&erts_node_table_mtx);
#ifdef DEBUG
    hash_get_info(&hi, &erts_node_table);
    ASSERT(node_entries == hi.objs);
#endif
    res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode);
    if (lock)
	erts_smp_mtx_unlock(&erts_node_table_mtx);
    return res;
}
Example #11
0
/*
** Rehash all objects
*/
static void rehash(SafeHash* h, int grow_limit)
{
    if (erts_smp_atomic_xchg(&h->is_rehashing, 1) != 0) {        
	return; /* already in progress */
    }
    if (h->grow_limit == grow_limit) {
	int i, size, bytes;
	SafeHashBucket** new_tab;
	SafeHashBucket** old_tab = h->tab;
	int old_size = h->size_mask + 1;

	size = old_size * 2; /* double table size */
	bytes = size * sizeof(SafeHashBucket*);
	new_tab = (SafeHashBucket **) erts_alloc(h->type, bytes);
	sys_memzero(new_tab, bytes);

	for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */
	    erts_smp_mtx_lock(&h->lock_vec[i].mtx);
	}

	h->tab = new_tab;
	set_size(h, size);

	for (i = 0; i < old_size; i++) {
	    SafeHashBucket* b = old_tab[i];
	    while (b != NULL) {
		SafeHashBucket* b_next = b->next;
		int ix = b->hvalue & h->size_mask;
		b->next = new_tab[ix];
		new_tab[ix] = b;
		b = b_next;
	    }
	}

	for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
	    erts_smp_mtx_unlock(&h->lock_vec[i].mtx);
	}
	erts_free(h->type, (void *) old_tab);
    }
    /*else already done */
    erts_smp_atomic_set(&h->is_rehashing, 0);
}
Example #12
0
void erts_time_remaining(SysTimeval *rem_time)
{
    int ticks;
#if !defined(ERTS_TIMER_THREAD)
    SysTimeval cur_time;
#endif
    long elapsed;

    /* next_time() returns no of ticks to next timeout or -1 if none */

    if ((ticks = next_time()) == -1) {
	/* timer queue empty */
	/* this will cause at most 100000000 ticks */
	rem_time->tv_sec = 100000;
	rem_time->tv_usec = 0;
    } else {
	/* next timeout after ticks ticks */
	ticks *= CLOCK_RESOLUTION;
	
#if defined(ERTS_TIMER_THREAD)
	elapsed = 0;
#else
	erts_smp_mtx_lock(&erts_timeofday_mtx);
	
	get_tolerant_timeofday(&cur_time);
	cur_time.tv_usec = 1000 * 
	    (cur_time.tv_usec / 1000);/* ms resolution*/
	elapsed = 1000 * (cur_time.tv_sec - last_delivered.tv_sec) +
	    (cur_time.tv_usec - last_delivered.tv_usec) / 1000;
	
	erts_smp_mtx_unlock(&erts_timeofday_mtx);
	
	if (ticks <= elapsed) { /* Ooops, better hurry */
	    rem_time->tv_sec = rem_time->tv_usec = 0;
	    return;
	}
#endif
	rem_time->tv_sec = (ticks - elapsed) / 1000;
	rem_time->tv_usec = 1000 * ((ticks - elapsed) % 1000);
    }
}
Example #13
0
File: code_ix.c Project: AugHu/otp
void erts_release_code_write_permission(void)
{
    erts_smp_mtx_lock(&code_write_permission_mtx);
    ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
    while (code_write_queue != NULL) { /* unleash the entire herd */
	struct code_write_queue_item* qitem = code_write_queue;
	erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
	if (!ERTS_PROC_IS_EXITING(qitem->p)) {
	    erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
	}
	erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
	code_write_queue = qitem->next;
	erts_proc_dec_refc(qitem->p);
	erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
    }
    code_writing_process = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_tsd_set(has_code_write_permission, (void *) 0);
#endif
    erts_smp_mtx_unlock(&code_write_permission_mtx);
}
Example #14
0
void 
wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
{
    UWord prev_total;
    SysTimeval tv;

    erts_smp_mtx_lock(&erts_timeofday_mtx);

    get_tolerant_timeofday(&tv);

    *ms_total = 1000 * (tv.tv_sec - inittv.tv_sec) +
	(tv.tv_usec - inittv.tv_usec) / 1000;

    prev_total = 1000 * (gtv.tv_sec - inittv.tv_sec) +
	(gtv.tv_usec - inittv.tv_usec) / 1000;
    *ms_diff = *ms_total - prev_total;
    gtv = tv;

    /* must sync the machine's idea of time here */
    do_erts_deliver_time(&tv);

    erts_smp_mtx_unlock(&erts_timeofday_mtx);
}
Example #15
0
void
erts_get_emu_time(SysTimeval *this_emu_time_p)
{
    erts_smp_mtx_lock(&erts_timeofday_mtx);
    
    get_tolerant_timeofday(this_emu_time_p);

    /* Make sure time is later than last */
    if (last_emu_time.tv_sec > this_emu_time_p->tv_sec ||
	(last_emu_time.tv_sec == this_emu_time_p->tv_sec
	 && last_emu_time.tv_usec >= this_emu_time_p->tv_usec)) {
	*this_emu_time_p = last_emu_time;
	this_emu_time_p->tv_usec++;
    }
    /* Check for carry from above + general reasonability */
    if (this_emu_time_p->tv_usec >= 1000000) {
	this_emu_time_p->tv_usec = 0;
	this_emu_time_p->tv_sec++;
    }

    last_emu_time = *this_emu_time_p;
    
    erts_smp_mtx_unlock(&erts_timeofday_mtx);
}
Example #16
0
void erts_time_remaining(SysTimeval *rem_time)
{
    erts_time_t ticks;
    SysTimeval cur_time;
    erts_time_t elapsed;

    /* erts_next_time() returns no of ticks to next timeout or -1 if none */

    ticks = (erts_time_t) erts_next_time();
    if (ticks == (erts_time_t) -1) {
	/* timer queue empty */
	/* this will cause at most 100000000 ticks */
	rem_time->tv_sec = 100000;
	rem_time->tv_usec = 0;
    } else {
	/* next timeout after ticks ticks */
	ticks *= CLOCK_RESOLUTION;
	
	erts_smp_mtx_lock(&erts_timeofday_mtx);
	
	get_tolerant_timeofday(&cur_time);
	cur_time.tv_usec = 1000 * 
	    (cur_time.tv_usec / 1000);/* ms resolution*/
	elapsed = 1000 * (cur_time.tv_sec - last_delivered.tv_sec) +
	    (cur_time.tv_usec - last_delivered.tv_usec) / 1000;
	
	erts_smp_mtx_unlock(&erts_timeofday_mtx);
	
	if (ticks <= elapsed) { /* Ooops, better hurry */
	    rem_time->tv_sec = rem_time->tv_usec = 0;
	    return;
	}
	rem_time->tv_sec = (ticks - elapsed) / 1000;
	rem_time->tv_usec = 1000 * ((ticks - elapsed) % 1000);
    }
}
Example #17
0
/*
 * Calller _must_ yield if we return 0
 */
int erts_try_seize_code_write_permission(Process* c_p)
{
    int success;
#ifdef ERTS_SMP
    ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */
#endif

    erts_smp_mtx_lock(&the_code_ix_queue_lock);
    success = !the_code_ix_lock;
    if (success) {
	the_code_ix_lock = 1;
    }
    else { /* Already locked */
	struct code_ix_queue_item* qitem;
	qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem));
	qitem->p = c_p;
	erts_smp_proc_inc_refc(c_p);
	qitem->next = the_code_ix_queue;
	the_code_ix_queue = qitem;
	erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
    }
   erts_smp_mtx_unlock(&the_code_ix_queue_lock);
   return success;
}
Example #18
0
int
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
    int port_was_enqueued = 0;
    Port *pp;
    ErtsPortTaskQueue *ptqp;
    ErtsPortTask *ptp;
    int res = 0;
    int reds = ERTS_PORT_REDS_EXECUTE;
    erts_aint_t io_tasks_executed = 0;
    int fpe_was_unmasked;
    ErtsPortTaskExeBlockData blk_data = {runq, NULL};

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PT_CHK_PORTQ(runq);

    pp = pop_port(runq);
    if (!pp) {
	res = 0;
	goto done;
    }

    ERTS_PORT_NOT_IN_RUNQ(pp);

    *curr_port_pp = pp;

    ASSERT(pp->sched.taskq);
    ASSERT(pp->sched.taskq->first);
    ptqp = pp->sched.taskq;
    pp->sched.taskq = NULL;

    ASSERT(!pp->sched.exe_taskq);
    pp->sched.exe_taskq = ptqp;

    if (erts_smp_port_trylock(pp) == EBUSY) {
	erts_smp_runq_unlock(runq);
	erts_smp_port_lock(pp);
	erts_smp_runq_lock(runq);
    }
    
    if (erts_sched_stat.enabled) {
	ErtsSchedulerData *esdp = erts_get_scheduler_data();
	Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
	int migrated = old && old != esdp->no;

	erts_smp_spin_lock(&erts_sched_stat.lock);
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++;
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++;
	if (migrated) {
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++;
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++;
	}
	erts_smp_spin_unlock(&erts_sched_stat.lock);
    }

    /* trace port scheduling, in */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
	trace_sched_ports(pp, am_in);
    }

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    ptp = pop_task(ptqp);

    fpe_was_unmasked = erts_block_fpe();

    while (ptp) {
	ASSERT(pp->sched.taskq != pp->sched.exe_taskq);

	reset_handle(ptp);
	erts_smp_runq_unlock(runq);

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
	ERTS_SMP_CHK_NO_PROC_LOCKS;
	ASSERT(pp->drv_ptr);

	switch (ptp->type) {
	case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */
	    reds += ERTS_PORT_REDS_FREE;
	    erts_smp_runq_lock(runq);

	    erts_unblock_fpe(fpe_was_unmasked);
	    ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
	    if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first))
		handle_remaining_tasks(runq, pp);
	    ASSERT(!ptqp->first
		   && (!pp->sched.taskq || !pp->sched.taskq->first));
#ifdef ERTS_SMP
	    erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
	    ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
#else
	    erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE);	    
#endif

	    port_task_free(ptp);
	    if (pp->sched.taskq)
		port_taskq_free(pp->sched.taskq);
	    pp->sched.taskq = NULL;

	    goto tasks_done;
	case ERTS_PORT_TASK_TIMEOUT:
	    reds += ERTS_PORT_REDS_TIMEOUT;
	    if (!(pp->status & ERTS_PORT_SFLGS_DEAD))
		(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
	    break;
	case ERTS_PORT_TASK_INPUT:
	    reds += ERTS_PORT_REDS_INPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    /* NOTE some windows drivers use ->ready_input for input and output */
	    (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_OUTPUT:
	    reds += ERTS_PORT_REDS_OUTPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_EVENT:
	    reds += ERTS_PORT_REDS_EVENT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_DIST_CMD:
	    reds += erts_dist_command(pp, CONTEXT_REDS-reds);
	    break;
	default:
	    erl_exit(ERTS_ABORT_EXIT,
		     "Invalid port task type: %d\n",
		     (int) ptp->type);
	    break;
	}

	if ((pp->status & ERTS_PORT_SFLG_CLOSING)
	    && erts_is_port_ioq_empty(pp)) {
	    reds += ERTS_PORT_REDS_TERMINATE;
	    erts_terminate_port(pp);
	}

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

#ifdef ERTS_SMP
	if (pp->xports)
	    erts_smp_xports_unlock(pp);
	ASSERT(!pp->xports);
#endif

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

	port_task_free(ptp);

	erts_smp_runq_lock(runq);

	ptp = pop_task(ptqp);
    }

 tasks_done:

    erts_unblock_fpe(fpe_was_unmasked);

    if (io_tasks_executed) {
	ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	       >= io_tasks_executed);
	erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
				 -1*io_tasks_executed);
    }

    *curr_port_pp = NULL;

#ifdef ERTS_SMP
    ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif

    if (!pp->sched.taskq) {
	ASSERT(pp->sched.exe_taskq);
	pp->sched.exe_taskq = NULL;
    }
    else {
#ifdef ERTS_SMP
	ErtsRunQueue *xrunq;
#endif

	ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
	ASSERT(pp->sched.taskq->first);

#ifdef ERTS_SMP
	xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (!xrunq) {
#endif
	    enqueue_port(runq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    /* No need to notify ourselves about inc in runq. */
#ifdef ERTS_SMP
	}
	else {
	    /* Port emigrated ... */
	    erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
	    enqueue_port(xrunq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    erts_smp_runq_unlock(xrunq);
	    erts_smp_notify_inc_runq(xrunq);
	}
#endif
	port_was_enqueued = 1;
    }

    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	   != (erts_aint_t) 0);

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    port_taskq_free(ptqp);

    if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) {
    	profile_runnable_port(pp, am_inactive);
    }

    /* trace port scheduling, out */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
    	trace_sched_ports(pp, am_out);
    }
#ifndef ERTS_SMP
    erts_port_release(pp);
#else
    {
	erts_aint_t refc;
	erts_smp_mtx_unlock(pp->lock);
	refc = erts_smp_atomic_dec_read_nob(&pp->refc);
	ASSERT(refc >= 0);
	if (refc == 0) {
	    erts_smp_runq_unlock(runq);
	    erts_port_cleanup(pp); /* Might aquire runq lock */
	    erts_smp_runq_lock(runq);
	    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
		   != (erts_aint_t) 0);
	}
    }
#endif

 done:
    blk_data.resp = &res;

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);

    return res;
}
Example #19
0
void erts_get_timeval(SysTimeval *tv)
{
    erts_smp_mtx_lock(&erts_timeofday_mtx);
    get_tolerant_timeofday(tv);
    erts_smp_mtx_unlock(&erts_timeofday_mtx);
}
Example #20
0
File: time.c Project: 1153/otp
static ERTS_INLINE void bump_timer_internal(erts_short_time_t dt) /* PRE: tiw_lock is write-locked */
{
    Uint keep_pos;
    Uint count;
    ErlTimer *p, **prev, *timeout_head, **timeout_tail;
    Uint dtime = (Uint) dt;  

    /* no need to bump the position if there aren't any timeouts */
    if (tiw_nto == 0) {
	erts_smp_mtx_unlock(&tiw_lock);
	return;
    }

    /* if do_time > TIW_SIZE we want to go around just once */
    count = (Uint)(dtime / TIW_SIZE) + 1;
    keep_pos = (tiw_pos + dtime) % TIW_SIZE;
    if (dtime > TIW_SIZE) dtime = TIW_SIZE;
  
    timeout_head = NULL;
    timeout_tail = &timeout_head;
    while (dtime > 0) {
	/* this is to decrease the counters with the right amount */
	/* when dtime >= TIW_SIZE */
	if (tiw_pos == keep_pos) count--;
	prev = &tiw[tiw_pos];
	while ((p = *prev) != NULL) {
	    ASSERT( p != p->next);
	    if (p->count < count) {     /* we have a timeout */
		/* remove min time */
		if (tiw_min_ptr == p) {
		    tiw_min_ptr = NULL;
		    tiw_min = 0;
		}

		/* Remove from list */
		remove_timer(p);
		*timeout_tail = p;	/* Insert in timeout queue */
		timeout_tail = &p->next;
	    }
	    else {
		/* no timeout, just decrease counter */
		p->count -= count;
		prev = &p->next;
	    }
	}
	tiw_pos = (tiw_pos + 1) % TIW_SIZE;
	dtime--;
    }
    tiw_pos = keep_pos;
    if (tiw_min_ptr)
	tiw_min -= dt;
    
    erts_smp_mtx_unlock(&tiw_lock);
    
    /* Call timedout timers callbacks */
    while (timeout_head) {
	p = timeout_head;
	timeout_head = p->next;
	/* Here comes hairy use of the timer fields!
	 * They are reset without having the lock.
	 * It is assumed that no code but this will
	 * accesses any field until the ->timeout
	 * callback is called.
	 */
	p->next = NULL;
	p->prev = NULL;
	p->slot = 0;
	(*p->timeout)(p->arg);
    }
}
Example #21
0
BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
{
    ErtsLiteralArea *unused_la;
    ErtsLiteralAreaRef *la_ref;

    if (BIF_P != erts_literal_area_collector)
	BIF_ERROR(BIF_P, EXC_NOTSUP);

    erts_smp_mtx_lock(&release_literal_areas.mtx);

    la_ref = release_literal_areas.first;
    if (la_ref) {
	release_literal_areas.first = la_ref->next;
	if (!release_literal_areas.first)
	    release_literal_areas.last = NULL;
    }

    erts_smp_mtx_unlock(&release_literal_areas.mtx);

    unused_la = ERTS_COPY_LITERAL_AREA();

    if (!la_ref) {
	ERTS_SET_COPY_LITERAL_AREA(NULL);
	if (unused_la) {
#ifdef ERTS_SMP
	    ErtsLaterReleasLiteralArea *lrlap;
	    lrlap = erts_alloc(ERTS_ALC_T_RELEASE_LAREA,
			       sizeof(ErtsLaterReleasLiteralArea));
	    lrlap->la = unused_la;
	    erts_schedule_thr_prgr_later_cleanup_op(
		later_release_literal_area,
		(void *) lrlap,
		&lrlap->lop,
		(sizeof(ErtsLaterReleasLiteralArea)
		 + sizeof(ErtsLiteralArea)
		 + ((unused_la->end
		     - &unused_la->start[0])
		    - 1)*(sizeof(Eterm))));
#else
	    erts_release_literal_area(unused_la);
#endif
	}
	BIF_RET(am_false);
    }

    ERTS_SET_COPY_LITERAL_AREA(la_ref->literal_area);

    erts_free(ERTS_ALC_T_LITERAL_REF, la_ref);

#ifdef ERTS_SMP
    erts_schedule_thr_prgr_later_op(complete_literal_area_switch,
				    unused_la,
				    &later_literal_area_switch);
    erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
    ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
#else
    erts_release_literal_area(unused_la);
    BIF_RET(am_true);
#endif

}
Example #22
0
BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
{
    if (BIF_P != erts_code_purger)
	BIF_ERROR(BIF_P, EXC_NOTSUP);

    if (is_not_atom(BIF_ARG_1))
	BIF_ERROR(BIF_P, BADARG);

    switch (BIF_ARG_2) {

    case am_prepare:
    case am_prepare_on_load: {
	/*
	 * Prepare for purge by marking all fun
	 * entries referring to the code to purge
	 * with "pending purge" markers.
	 */
	ErtsCodeIndex code_ix;
	Module* modp;
	Eterm res;

	if (is_value(purge_state.module))
	    BIF_ERROR(BIF_P, BADARG);

	code_ix = erts_active_code_ix();

	/*
	 * Correct module?
	 */
	modp = erts_get_module(BIF_ARG_1, code_ix);
	if (!modp)
	    res = am_false;
	else {
	    /*
	     * Any code to purge?
	     */

	    if (BIF_ARG_2 == am_prepare_on_load) {
                erts_rwlock_old_code(code_ix);
	    } else {
		erts_rlock_old_code(code_ix);
	    }

	    if (BIF_ARG_2 == am_prepare_on_load) {
		ASSERT(modp->on_load);
		ASSERT(modp->on_load->code_hdr);
		purge_state.saved_old = modp->old;
		modp->old = *modp->on_load;
		erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) modp->on_load);
		modp->on_load = 0;
	    }

	    if (!modp->old.code_hdr)
		res = am_false;
	    else {
		BeamInstr* code;
		BeamInstr* end;
		erts_smp_mtx_lock(&purge_state.mtx);
		purge_state.module = BIF_ARG_1;
		erts_smp_mtx_unlock(&purge_state.mtx);
		res = am_true;
		code = (BeamInstr*) modp->old.code_hdr;
		end = (BeamInstr *)((char *)code + modp->old.code_length);
		erts_fun_purge_prepare(code, end);
	    }

            if (BIF_ARG_2 == am_prepare_on_load) {
                erts_rwunlock_old_code(code_ix);
	    } else {
                erts_runlock_old_code(code_ix);
	    }
	}
	
#ifndef ERTS_SMP
	BIF_RET(res);
#else
	if (res != am_true)
	    BIF_RET(res);
	else {
	    /*
	     * We'll be resumed when all schedulers are guaranteed
	     * to see the "pending purge" markers that we've made on
	     * all fun entries of the code that we are about to purge.
	     * Processes trying to call these funs will be suspended
	     * before calling the funs. That is we are guaranteed not
	     * to get any more direct references into the code while
	     * checking for such references...
	     */
	    erts_schedule_thr_prgr_later_op(resume_purger,
					    NULL,
					    &purger_lop_data);
	    erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
	    ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
	}
#endif
    }

    case am_abort: {
	/*
	 * Soft purge that detected direct references into the code
	 * we set out to purge. Abort the purge.
	 */

	if (purge_state.module != BIF_ARG_1)
	    BIF_ERROR(BIF_P, BADARG);

	erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix);

#ifndef ERTS_SMP
	erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
	finalize_purge_operation(BIF_P, 0);
	BIF_RET(am_false);
#else
	/*
	 * We need to restore the code addresses of the funs in
	 * two stages in order to ensure that we do not get any
	 * stale suspended processes due to the purge abort.
	 * Restore address pointer (erts_fun_purge_abort_prepare);
	 * wait for thread progress; clear pending purge address
	 * pointer (erts_fun_purge_abort_finalize), and then
	 * resume processes that got suspended
	 * (finalize_purge_operation).
	 */
	erts_schedule_thr_prgr_later_op(finalize_purge_abort,
					NULL,
					&purger_lop_data);
	erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
	ERTS_BIF_YIELD_RETURN(BIF_P, am_false);
#endif
    }

    case am_complete: {
	ErtsCodeIndex code_ix;
	BeamInstr* code;
	Module* modp;
	int is_blocking = 0;
	Eterm ret;
	ErtsLiteralArea *literals = NULL;


	/*
	 * We have no direct references into the code.
	 * Complete to purge.
	 */

	if (purge_state.module != BIF_ARG_1)
	    BIF_ERROR(BIF_P, BADARG);

	if (!erts_try_seize_code_write_permission(BIF_P)) {
	    ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_purge_module_2],
			    BIF_P, BIF_ARG_1, BIF_ARG_2);
	}

	code_ix = erts_active_code_ix();

	/*
	 * Correct module?
	 */

	if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
	    ERTS_BIF_PREP_RET(ret, am_false);
	}
	else {

	    erts_rwlock_old_code(code_ix);

	    /*
	     * Any code to purge?
	     */
	    if (!modp->old.code_hdr) {
		ERTS_BIF_PREP_RET(ret, am_false);
	    }
	    else {
		/*
		 * Unload any NIF library
		 */
		if (modp->old.nif != NULL
		    || IF_HIPE(hipe_purge_need_blocking(modp))) {
		    /* ToDo: Do unload nif without blocking */
		    erts_rwunlock_old_code(code_ix);
		    erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
		    erts_smp_thr_progress_block();
		    is_blocking = 1;
		    erts_rwlock_old_code(code_ix);
		    if (modp->old.nif) {
		      erts_unload_nif(modp->old.nif);
		      modp->old.nif = NULL;
		    }
		}

		/*
		 * Remove the old code.
		 */
		ASSERT(erts_total_code_size >= modp->old.code_length);
		erts_total_code_size -= modp->old.code_length;
		code = (BeamInstr*) modp->old.code_hdr;
		erts_fun_purge_complete(purge_state.funs, purge_state.fe_ix);
		beam_catches_delmod(modp->old.catches, code, modp->old.code_length,
				    code_ix);
		literals = modp->old.code_hdr->literal_area;
		modp->old.code_hdr->literal_area = NULL;
		erts_free(ERTS_ALC_T_CODE, (void *) code);
		modp->old.code_hdr = NULL;
		modp->old.code_length = 0;
		modp->old.catches = BEAM_CATCHES_NIL;
		erts_remove_from_ranges(code);
#ifdef HIPE
		hipe_purge_module(modp, is_blocking);
#endif
		ERTS_BIF_PREP_RET(ret, am_true);
	    }

	    if (purge_state.saved_old.code_hdr) {
		modp->old = purge_state.saved_old;
		purge_state.saved_old.code_hdr = 0;
	    }
	    erts_rwunlock_old_code(code_ix);
	}
	if (is_blocking) {
	    erts_smp_thr_progress_unblock();
	    erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
	}

	erts_release_code_write_permission();

	finalize_purge_operation(BIF_P, ret == am_true);

	if (literals) {
	    ErtsLiteralAreaRef *ref;
	    ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
			     sizeof(ErtsLiteralAreaRef));
	    ref->literal_area = literals;
	    ref->next = NULL;
	    erts_smp_mtx_lock(&release_literal_areas.mtx);
	    if (release_literal_areas.last) {
		release_literal_areas.last->next = ref;
		release_literal_areas.last = ref;
	    }
	    else {
		release_literal_areas.first = ref;
		release_literal_areas.last = ref;
	    }
	    erts_smp_mtx_unlock(&release_literal_areas.mtx);
	    erts_queue_message(erts_literal_area_collector,
			       0,
			       erts_alloc_message(0, NULL),
			       am_copy_literals,
			       BIF_P->common.id);
	}

	return ret;
    }

    default:
	BIF_ERROR(BIF_P, BADARG);

    }
}