Beispiel #1
0
static int
ms_wait(Process *c_p, Eterm etimeout, int busy)
{
    ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
    ErtsMonotonicTime time, timeout_time;
    Sint64 ms;

    if (!term_to_Sint64(etimeout, &ms))
	return 0;

    time = erts_get_monotonic_time(esdp);

    if (ms < 0)
	timeout_time = time;
    else
	timeout_time = time + ERTS_MSEC_TO_MONOTONIC(ms);

    while (time < timeout_time) {
	if (busy)
	    erts_thr_yield();
	else {
	    ErtsMonotonicTime timeout = timeout_time - time;

#ifdef __WIN32__
	    Sleep((DWORD) ERTS_MONOTONIC_TO_MSEC(timeout));
#else
	    {
		ErtsMonotonicTime to = ERTS_MONOTONIC_TO_USEC(timeout);
		struct timeval tv;

		tv.tv_sec = (long) to / (1000*1000);
		tv.tv_usec = (long) to % (1000*1000);

		select(0, NULL, NULL, NULL, &tv);
	    }
#endif
	}

	time = erts_get_monotonic_time(esdp);
    }
    return 1;
}
Beispiel #2
0
/*
 * erts_proc_lock_failed() is called when erts_smp_proc_lock()
 * wasn't able to lock all locks. We may need to transfer locks
 * to waiters and wait for our turn on locks.
 *
 * Iff !ERTS_PROC_LOCK_ATOMIC_IMPL, the pix lock is locked on entry.
 *
 * This always returns with the pix lock unlocked.
 */
void
erts_proc_lock_failed(Process *p,
		      erts_pix_lock_t *pixlck,
		      ErtsProcLocks locks,
		      ErtsProcLocks old_lflgs)
{
    int until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
    int thr_spin_count;
    int spin_count;
    ErtsProcLocks need_locks = locks;
    ErtsProcLocks olflgs = old_lflgs;

    if (erts_thr_get_main_status())
	thr_spin_count = proc_lock_spin_count;
    else
	thr_spin_count = aux_thr_proc_lock_spin_count;

    spin_count = thr_spin_count;

    while (need_locks != 0) {
        ErtsProcLocks can_grab;

	can_grab = in_order_locks(olflgs, need_locks);

        if (can_grab == 0) {
            /* Someone already has the lowest-numbered lock we want. */

            if (spin_count-- <= 0) {
                /* Too many retries, give up and sleep for the lock. */
                wait_for_locks(p, pixlck, locks, need_locks, olflgs);
                return;
            }

	    ERTS_SPIN_BODY;

	    if (--until_yield == 0) {
		until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD;
		erts_thr_yield();
	    }

            olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
        }
        else {
            /* Try to grab all of the grabbable locks at once with cmpxchg. */
            ErtsProcLocks grabbed = olflgs | can_grab;
            ErtsProcLocks nflgs =
                ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock, grabbed, olflgs);

            if (nflgs == olflgs) {
                /* Success! We grabbed the 'can_grab' locks. */
                olflgs = grabbed;
                need_locks &= ~can_grab;

                /* Since we made progress, reset the spin count. */
                spin_count = thr_spin_count;
            }
            else {
                /* Compare-and-exchange failed, try again. */
                olflgs = nflgs;
            }
        }
    }

   /* Now we have all of the locks we wanted. */

#if !ERTS_PROC_LOCK_ATOMIC_IMPL
    erts_pix_unlock(pixlck);
#endif
}
Beispiel #3
0
static BIF_RETTYPE
dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
{
    BIF_RETTYPE ret;
    if (am_scheduler == arg1) {
	ErtsSchedulerData *esdp;
	if (arg2 != am_type) 
	    goto badarg;
	esdp = erts_proc_sched_data(c_p);
	if (!esdp)
            goto scheduler_type_error;
      
        switch (esdp->type) {
        case ERTS_SCHED_NORMAL:
	    ERTS_BIF_PREP_RET(ret, am_normal);
            break;
        case ERTS_SCHED_DIRTY_CPU:
	    ERTS_BIF_PREP_RET(ret, am_dirty_cpu);
            break;
        case ERTS_SCHED_DIRTY_IO:
	    ERTS_BIF_PREP_RET(ret, am_dirty_io);
            break;
        default:
        scheduler_type_error:
	    ERTS_BIF_PREP_RET(ret, am_error);
            break;
        }
    }
    else if (am_error == arg1) {
	switch (arg2) {
	case am_notsup:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP);
	    break;
	case am_undef:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
	    break;
	case am_badarith:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH);
	    break;
	case am_noproc:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC);
	    break;
	case am_system_limit:
	    ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
	    break;
	case am_badarg:
	default:
	    goto badarg;
	}
    }
    else if (am_copy == arg1) {
	int i;
	Eterm res;

	for (res = NIL, i = 0; i < 1000; i++) {
	    Eterm *hp, sz;
	    Eterm cpy;
	    /* We do not want this to be optimized,
	       but rather the oposite... */
	    sz = size_object(arg2);
	    hp = HAlloc(c_p, sz);
	    cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap);
	    hp = HAlloc(c_p, 2);
	    res = CONS(hp, cpy, res);
	}

	ERTS_BIF_PREP_RET(ret, res);
    }
    else if (am_send == arg1) {
	dirty_send_message(c_p, arg2, am_ok);
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("wait", arg1)) {
	if (!ms_wait(c_p, arg2, type == am_dirty_cpu))
	    goto badarg;
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("reschedule", arg1)) {
	/*
	 * Reschedule operation after decrement of two until we reach
	 * zero. Switch between dirty scheduler types when 'n' is
	 * evenly divided by 4. If the initial value wasn't evenly
	 * dividable by 2, throw badarg exception.
	 */
	Eterm next_type;
	Sint n;
	if (!term_to_Sint(arg2, &n) || n < 0)
	    goto badarg;
	if (n == 0)
	    ERTS_BIF_PREP_RET(ret, am_ok);
	else {
	    Eterm argv[3];
	    Eterm eint = erts_make_integer((Uint) (n - 2), c_p);
	    if (n % 4 != 0)
		next_type = type;
	    else {
		switch (type) {
		case am_dirty_cpu: next_type = am_dirty_io; break;
		case am_dirty_io: next_type = am_normal; break;
		case am_normal: next_type = am_dirty_cpu; break;
		default: goto badarg;
		}
	    }
	    switch (next_type) {
	    case am_dirty_io:
		argv[0] = arg1;
		argv[1] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_io_2,
					ERTS_SCHED_DIRTY_IO,
					am_erts_debug,
					am_dirty_io,
					2);
		break;
	    case am_dirty_cpu:
		argv[0] = arg1;
		argv[1] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_cpu_2,
					ERTS_SCHED_DIRTY_CPU,
					am_erts_debug,
					am_dirty_cpu,
					2);
		break;
	    case am_normal:
		argv[0] = am_normal;
		argv[1] = arg1;
		argv[2] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_3,
					ERTS_SCHED_NORMAL,
					am_erts_debug,
					am_dirty,
					3);
		break;
	    default:
		goto badarg;
	    }
	}
    }
    else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) {
	ERTS_DECL_AM(ready);
	ERTS_DECL_AM(done);
	dirty_send_message(c_p, arg2, AM_ready);
	ms_wait(c_p, make_small(6000), 0);
	dirty_send_message(c_p, arg2, AM_done);
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) {
	Process *real_c_p = erts_proc_shadow2real(c_p);
	Eterm *hp, *hp2;
	Uint sz;
	int i;
	ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
        int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO;

	if (ERTS_PROC_IS_EXITING(real_c_p))
	    goto badarg;
	dirty_send_message(c_p, arg2, am_alive);

	/* Wait until dead */
	while (!ERTS_PROC_IS_EXITING(real_c_p)) {
            if (dirty_io)
                ms_wait(c_p, make_small(100), 0);
            else
                erts_thr_yield();
        }

	ms_wait(c_p, make_small(1000), 0);

	/* Should still be able to allocate memory */
	hp = HAlloc(c_p, 3); /* Likely on heap */
	sz = 10000;
	hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */
	*hp2 = make_pos_bignum_header(sz);
	for (i = 1; i < sz; i++)
	    hp2[i] = (Eterm) 4711;
	ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2)));
    }
    else {
    badarg:
	ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
    }
    return ret;
}