示例#1
0
BIF_RETTYPE make_ref_0(BIF_ALIST_0)
{
    BIF_RETTYPE res;
    Eterm* hp;

    ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));

    hp = HAlloc(BIF_P, REF_THING_SIZE);

    res = erts_sched_make_ref_in_buffer(erts_proc_sched_data(BIF_P), hp);

    BIF_RET(res);
}
示例#2
0
文件: beam_bp.c 项目: josevalim/otp
static ERTS_INLINE Uint32
acquire_bp_sched_ix(Process *c_p)
{
    ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
    ASSERT(esdp);
#ifdef ERTS_DIRTY_SCHEDULERS
    if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
	erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx);
        return (Uint32) erts_no_schedulers;
    }
#endif
    return (Uint32) esdp->no - 1;
}
示例#3
0
static ERTS_INLINE Eterm unique_integer_bif(Process *c_p, int positive)
{
    ErtsSchedulerData *esdp;
    Uint64 thr_id, unique;
    Uint hsz;
    Eterm *hp;

    esdp = erts_proc_sched_data(c_p);
    thr_id = (Uint64) esdp->thr_id;
    unique = esdp->unique++;
    bld_unique_integer_term(NULL, &hsz, thr_id, unique, positive);
    hp = hsz ? HAlloc(c_p, hsz) : NULL;
    return bld_unique_integer_term(&hp, NULL, thr_id, unique, positive);
}
示例#4
0
文件: beam_debug.c 项目: c-bik/otp
static int
ms_wait(Process *c_p, Eterm etimeout, int busy)
{
    ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
    ErtsMonotonicTime time, timeout_time;
    Sint64 ms;

    if (!term_to_Sint64(etimeout, &ms))
	return 0;

    time = erts_get_monotonic_time(esdp);

    if (ms < 0)
	timeout_time = time;
    else
	timeout_time = time + ERTS_MSEC_TO_MONOTONIC(ms);

    while (time < timeout_time) {
	if (busy)
	    erts_thr_yield();
	else {
	    ErtsMonotonicTime timeout = timeout_time - time;

#ifdef __WIN32__
	    Sleep((DWORD) ERTS_MONOTONIC_TO_MSEC(timeout));
#else
	    {
		ErtsMonotonicTime to = ERTS_MONOTONIC_TO_USEC(timeout);
		struct timeval tv;

		tv.tv_sec = (long) to / (1000*1000);
		tv.tv_usec = (long) to % (1000*1000);

		select(0, NULL, NULL, NULL, &tv);
	    }
#endif
	}

	time = erts_get_monotonic_time(esdp);
    }
    return 1;
}
示例#5
0
文件: beam_debug.c 项目: c-bik/otp
static BIF_RETTYPE
dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
{
    BIF_RETTYPE ret;
    if (am_scheduler == arg1) {
	ErtsSchedulerData *esdp;
	if (arg2 != am_type) 
	    goto badarg;
	esdp = erts_proc_sched_data(c_p);
	if (!esdp)
            goto scheduler_type_error;
      
        switch (esdp->type) {
        case ERTS_SCHED_NORMAL:
	    ERTS_BIF_PREP_RET(ret, am_normal);
            break;
        case ERTS_SCHED_DIRTY_CPU:
	    ERTS_BIF_PREP_RET(ret, am_dirty_cpu);
            break;
        case ERTS_SCHED_DIRTY_IO:
	    ERTS_BIF_PREP_RET(ret, am_dirty_io);
            break;
        default:
        scheduler_type_error:
	    ERTS_BIF_PREP_RET(ret, am_error);
            break;
        }
    }
    else if (am_error == arg1) {
	switch (arg2) {
	case am_notsup:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP);
	    break;
	case am_undef:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
	    break;
	case am_badarith:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH);
	    break;
	case am_noproc:
	    ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC);
	    break;
	case am_system_limit:
	    ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
	    break;
	case am_badarg:
	default:
	    goto badarg;
	}
    }
    else if (am_copy == arg1) {
	int i;
	Eterm res;

	for (res = NIL, i = 0; i < 1000; i++) {
	    Eterm *hp, sz;
	    Eterm cpy;
	    /* We do not want this to be optimized,
	       but rather the oposite... */
	    sz = size_object(arg2);
	    hp = HAlloc(c_p, sz);
	    cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap);
	    hp = HAlloc(c_p, 2);
	    res = CONS(hp, cpy, res);
	}

	ERTS_BIF_PREP_RET(ret, res);
    }
    else if (am_send == arg1) {
	dirty_send_message(c_p, arg2, am_ok);
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("wait", arg1)) {
	if (!ms_wait(c_p, arg2, type == am_dirty_cpu))
	    goto badarg;
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("reschedule", arg1)) {
	/*
	 * Reschedule operation after decrement of two until we reach
	 * zero. Switch between dirty scheduler types when 'n' is
	 * evenly divided by 4. If the initial value wasn't evenly
	 * dividable by 2, throw badarg exception.
	 */
	Eterm next_type;
	Sint n;
	if (!term_to_Sint(arg2, &n) || n < 0)
	    goto badarg;
	if (n == 0)
	    ERTS_BIF_PREP_RET(ret, am_ok);
	else {
	    Eterm argv[3];
	    Eterm eint = erts_make_integer((Uint) (n - 2), c_p);
	    if (n % 4 != 0)
		next_type = type;
	    else {
		switch (type) {
		case am_dirty_cpu: next_type = am_dirty_io; break;
		case am_dirty_io: next_type = am_normal; break;
		case am_normal: next_type = am_dirty_cpu; break;
		default: goto badarg;
		}
	    }
	    switch (next_type) {
	    case am_dirty_io:
		argv[0] = arg1;
		argv[1] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_io_2,
					ERTS_SCHED_DIRTY_IO,
					am_erts_debug,
					am_dirty_io,
					2);
		break;
	    case am_dirty_cpu:
		argv[0] = arg1;
		argv[1] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_cpu_2,
					ERTS_SCHED_DIRTY_CPU,
					am_erts_debug,
					am_dirty_cpu,
					2);
		break;
	    case am_normal:
		argv[0] = am_normal;
		argv[1] = arg1;
		argv[2] = eint;
		ret = erts_schedule_bif(c_p,
					argv,
					I,
					erts_debug_dirty_3,
					ERTS_SCHED_NORMAL,
					am_erts_debug,
					am_dirty,
					3);
		break;
	    default:
		goto badarg;
	    }
	}
    }
    else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) {
	ERTS_DECL_AM(ready);
	ERTS_DECL_AM(done);
	dirty_send_message(c_p, arg2, AM_ready);
	ms_wait(c_p, make_small(6000), 0);
	dirty_send_message(c_p, arg2, AM_done);
	ERTS_BIF_PREP_RET(ret, am_ok);
    }
    else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) {
	Process *real_c_p = erts_proc_shadow2real(c_p);
	Eterm *hp, *hp2;
	Uint sz;
	int i;
	ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
        int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO;

	if (ERTS_PROC_IS_EXITING(real_c_p))
	    goto badarg;
	dirty_send_message(c_p, arg2, am_alive);

	/* Wait until dead */
	while (!ERTS_PROC_IS_EXITING(real_c_p)) {
            if (dirty_io)
                ms_wait(c_p, make_small(100), 0);
            else
                erts_thr_yield();
        }

	ms_wait(c_p, make_small(1000), 0);

	/* Should still be able to allocate memory */
	hp = HAlloc(c_p, 3); /* Likely on heap */
	sz = 10000;
	hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */
	*hp2 = make_pos_bignum_header(sz);
	for (i = 1; i < sz; i++)
	    hp2[i] = (Eterm) 4711;
	ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2)));
    }
    else {
    badarg:
	ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
    }
    return ret;
}
示例#6
0
NifExport *
erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
			 ErtsCodeMFA *mfa, BeamInstr *pc,
			 BeamInstr instr,
			 void *dfunc, void *ifunc,
			 Eterm mod, Eterm func,
			 int argc, const Eterm *argv)
{
    Process *used_proc;
    ErtsSchedulerData *esdp;
    Eterm* reg;
    NifExport* nep;
    int i;

    ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
		       & ERTS_PROC_LOCK_MAIN);

    if (dirty_shadow_proc) {
	esdp = erts_get_scheduler_data();
	ASSERT(esdp && ERTS_SCHEDULER_IS_DIRTY(esdp));

	used_proc = dirty_shadow_proc;
    }
    else {
	esdp = erts_proc_sched_data(c_p);
	ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));

	used_proc = c_p;
	ERTS_VBUMP_ALL_REDS(c_p);
    }

    reg = esdp->x_reg_array;

    if (mfa)
	nep = erts_get_proc_nif_export(c_p, (int) mfa->arity);
    else {
	/* If no mfa, this is not the first schedule... */
	nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
	ASSERT(nep && nep->argc >= 0);
    }

    if (nep->argc < 0) {
	/*
	 * First schedule; save things that might
	 * need to be restored...
	 */
	for (i = 0; i < (int) mfa->arity; i++)
	    nep->argv[i] = reg[i];
	nep->pc = pc;
	nep->cp = c_p->cp;
	nep->mfa = mfa;
	nep->current = c_p->current;
	ASSERT(argc >= 0);
	nep->argc = (int) mfa->arity;
	nep->m = NULL;

	ASSERT(!erts_check_nif_export_in_area(c_p,
					      (char *) nep,
					      (sizeof(NifExport)
					       + (sizeof(Eterm)
						  *(nep->argc-1)))));
    }
    /* Copy new arguments into register array if necessary... */
    if (reg != argv) {
	for (i = 0; i < argc; i++)
	    reg[i] = argv[i];
    }
    ASSERT(is_atom(mod) && is_atom(func));
    nep->exp.info.mfa.module = mod;
    nep->exp.info.mfa.function = func;
    nep->exp.info.mfa.arity = (Uint) argc;
    nep->exp.beam[0] = (BeamInstr) instr; /* call_nif || apply_bif */
    nep->exp.beam[1] = (BeamInstr) dfunc;
    nep->func = ifunc;
    used_proc->arity = argc;
    used_proc->freason = TRAP;
    used_proc->i = (BeamInstr*) nep->exp.addressv[0];
    return nep;
}
示例#7
0
文件: beam_bp.c 项目: Duncaen/otp
static ERTS_INLINE ErtsMonotonicTime
get_mtime(Process *c_p)
{
    return erts_get_monotonic_time(erts_proc_sched_data(c_p));
}
示例#8
0
/*
 * This function is responsible for enabling, disabling, resetting and
 * gathering data related to microstate accounting.
 *
 * Managed threads and unmanaged threads are handled differently.
 *   - managed threads get a misc_aux job telling them to switch on msacc
 *   - unmanaged have some fields protected by a mutex that has to be taken
 *     before any values can be updated
 *
 * For performance reasons there is also a global value erts_msacc_enabled
 * that controls the state of all threads. Statistics gathering is only on
 * if erts_msacc_enabled && msacc is true.
 */
Eterm
erts_msacc_request(Process *c_p, int action, Eterm *threads)
{
#ifdef ERTS_ENABLE_MSACC
    ErtsMsAcc *msacc =  ERTS_MSACC_TSD_GET();
    ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
    Eterm ref;
    ErtsMSAccReq *msaccrp;
    Eterm *hp;


#ifdef ERTS_MSACC_ALWAYS_ON
    if (action == ERTS_MSACC_ENABLE || action == ERTS_MSACC_DISABLE)
        return THE_NON_VALUE;
#else
    /* take care of double enable, and double disable here */
    if (msacc && action == ERTS_MSACC_ENABLE) {
        return THE_NON_VALUE;
    } else if (!msacc && action == ERTS_MSACC_DISABLE) {
        return THE_NON_VALUE;
    }
#endif

    ref = erts_make_ref(c_p);

    msaccrp = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMSAccReq));
    hp = &msaccrp->ref_heap[0];

    msaccrp->action = action;
    msaccrp->proc = c_p;
    msaccrp->ref = STORE_NC(&hp, NULL, ref);
    msaccrp->req_sched = esdp->no;

#ifdef ERTS_SMP
    *threads = erts_no_schedulers;
    *threads += 1; /* aux thread */
#else
    *threads = 1;
#endif

    erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads);

    erts_proc_add_refc(c_p, *threads);

    if (erts_no_schedulers > 1)
	erts_schedule_multi_misc_aux_work(1,
                                          erts_no_schedulers,
                                          reply_msacc,
                                          (void *) msaccrp);
#ifdef ERTS_SMP
    /* aux thread */
    erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp);
#endif

#ifdef USE_THREADS
    /* Manage unmanaged threads */
    switch (action) {
    case ERTS_MSACC_GATHER: {
        Uint unmanaged_count;
        ErtsMsAcc *msacc, **unmanaged;
        int i = 0;

        /* we copy a list of pointers here so that we do not have to have
           the msacc_mutex when sending messages */
        erts_rwmtx_rlock(&msacc_mutex);
        unmanaged_count = msacc_unmanaged_count;
        unmanaged = erts_alloc(ERTS_ALC_T_MSACC,
                               sizeof(ErtsMsAcc*)*unmanaged_count);

        for (i = 0, msacc = msacc_unmanaged;
             i < unmanaged_count;
             i++, msacc = msacc->next) {
            unmanaged[i] = msacc;
        }
        erts_rwmtx_runlock(&msacc_mutex);

        for (i = 0; i < unmanaged_count; i++) {
            erts_mtx_lock(&unmanaged[i]->mtx);
            if (unmanaged[i]->perf_counter) {
                ErtsSysPerfCounter perf_counter;
                /* if enabled update stats */
                perf_counter = erts_sys_perf_counter();
                unmanaged[i]->perf_counters[unmanaged[i]->state] +=
                    perf_counter - unmanaged[i]->perf_counter;
                unmanaged[i]->perf_counter = perf_counter;
            }
            erts_mtx_unlock(&unmanaged[i]->mtx);
            send_reply(unmanaged[i],msaccrp);
        }
        erts_free(ERTS_ALC_T_MSACC,unmanaged);
        /* We have just sent unmanaged_count messages, so bump no of threads */
        *threads += unmanaged_count;
        break;
    }
    case ERTS_MSACC_RESET: {
        ErtsMsAcc *msacc;
        erts_rwmtx_rlock(&msacc_mutex);
        for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next)
            erts_msacc_reset(msacc);
        erts_rwmtx_runlock(&msacc_mutex);
        break;
    }
    case ERTS_MSACC_ENABLE: {
        erts_rwmtx_rlock(&msacc_mutex);
        for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) {
            erts_mtx_lock(&msacc->mtx);
            msacc->perf_counter = erts_sys_perf_counter();
            /* we assume the unmanaged thread is sleeping */
            msacc->state = ERTS_MSACC_STATE_SLEEP;
            erts_mtx_unlock(&msacc->mtx);
        }
        erts_rwmtx_runlock(&msacc_mutex);
        break;
    }
    case ERTS_MSACC_DISABLE: {
        ErtsSysPerfCounter perf_counter;
        erts_rwmtx_rlock(&msacc_mutex);
        /* make sure to update stats with latest results */
        for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) {
            erts_mtx_lock(&msacc->mtx);
            perf_counter = erts_sys_perf_counter();
            msacc->perf_counters[msacc->state] += perf_counter - msacc->perf_counter;
            msacc->perf_counter = 0;
            erts_mtx_unlock(&msacc->mtx);
        }
        erts_rwmtx_runlock(&msacc_mutex);
        break;
    }
    default: { ASSERT(0); }
    }

#endif

    *threads = make_small(*threads);

    reply_msacc((void *) msaccrp);

#ifndef ERTS_MSACC_ALWAYS_ON
    /* enable/disable the global value */
    if (action == ERTS_MSACC_ENABLE) {
        erts_msacc_enabled = 1;
    } else if (action == ERTS_MSACC_DISABLE) {
        erts_msacc_enabled = 0;
    }
#endif

    return ref;
#else
    return THE_NON_VALUE;
#endif
}