static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I) { BIF_RETTYPE ret; if (am_scheduler == arg1) { ErtsSchedulerData *esdp; if (arg2 != am_type) goto badarg; esdp = erts_proc_sched_data(c_p); if (!esdp) goto scheduler_type_error; switch (esdp->type) { case ERTS_SCHED_NORMAL: ERTS_BIF_PREP_RET(ret, am_normal); break; case ERTS_SCHED_DIRTY_CPU: ERTS_BIF_PREP_RET(ret, am_dirty_cpu); break; case ERTS_SCHED_DIRTY_IO: ERTS_BIF_PREP_RET(ret, am_dirty_io); break; default: scheduler_type_error: ERTS_BIF_PREP_RET(ret, am_error); break; } } else if (am_error == arg1) { switch (arg2) { case am_notsup: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP); break; case am_undef: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF); break; case am_badarith: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH); break; case am_noproc: ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC); break; case am_system_limit: ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT); break; case am_badarg: default: goto badarg; } } else if (am_copy == arg1) { int i; Eterm res; for (res = NIL, i = 0; i < 1000; i++) { Eterm *hp, sz; Eterm cpy; /* We do not want this to be optimized, but rather the oposite... */ sz = size_object(arg2); hp = HAlloc(c_p, sz); cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap); hp = HAlloc(c_p, 2); res = CONS(hp, cpy, res); } ERTS_BIF_PREP_RET(ret, res); } else if (am_send == arg1) { dirty_send_message(c_p, arg2, am_ok); ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("wait", arg1)) { if (!ms_wait(c_p, arg2, type == am_dirty_cpu)) goto badarg; ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("reschedule", arg1)) { /* * Reschedule operation after decrement of two until we reach * zero. Switch between dirty scheduler types when 'n' is * evenly divided by 4. If the initial value wasn't evenly * dividable by 2, throw badarg exception. */ Eterm next_type; Sint n; if (!term_to_Sint(arg2, &n) || n < 0) goto badarg; if (n == 0) ERTS_BIF_PREP_RET(ret, am_ok); else { Eterm argv[3]; Eterm eint = erts_make_integer((Uint) (n - 2), c_p); if (n % 4 != 0) next_type = type; else { switch (type) { case am_dirty_cpu: next_type = am_dirty_io; break; case am_dirty_io: next_type = am_normal; break; case am_normal: next_type = am_dirty_cpu; break; default: goto badarg; } } switch (next_type) { case am_dirty_io: argv[0] = arg1; argv[1] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_io_2, ERTS_SCHED_DIRTY_IO, am_erts_debug, am_dirty_io, 2); break; case am_dirty_cpu: argv[0] = arg1; argv[1] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_cpu_2, ERTS_SCHED_DIRTY_CPU, am_erts_debug, am_dirty_cpu, 2); break; case am_normal: argv[0] = am_normal; argv[1] = arg1; argv[2] = eint; ret = erts_schedule_bif(c_p, argv, I, erts_debug_dirty_3, ERTS_SCHED_NORMAL, am_erts_debug, am_dirty, 3); break; default: goto badarg; } } } else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) { ERTS_DECL_AM(ready); ERTS_DECL_AM(done); dirty_send_message(c_p, arg2, AM_ready); ms_wait(c_p, make_small(6000), 0); dirty_send_message(c_p, arg2, AM_done); ERTS_BIF_PREP_RET(ret, am_ok); } else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) { Process *real_c_p = erts_proc_shadow2real(c_p); Eterm *hp, *hp2; Uint sz; int i; ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO; if (ERTS_PROC_IS_EXITING(real_c_p)) goto badarg; dirty_send_message(c_p, arg2, am_alive); /* Wait until dead */ while (!ERTS_PROC_IS_EXITING(real_c_p)) { if (dirty_io) ms_wait(c_p, make_small(100), 0); else erts_thr_yield(); } ms_wait(c_p, make_small(1000), 0); /* Should still be able to allocate memory */ hp = HAlloc(c_p, 3); /* Likely on heap */ sz = 10000; hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */ *hp2 = make_pos_bignum_header(sz); for (i = 1; i < sz; i++) hp2[i] = (Eterm) 4711; ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2))); } else { badarg: ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); } return ret; }
Eterm load_module_2(BIF_ALIST_2) { Eterm reason; Eterm* hp; int i; int sz; byte* code; Eterm res; byte* temp_alloc = NULL; if (is_not_atom(BIF_ARG_1)) { error: erts_free_aligned_binary_bytes(temp_alloc); BIF_ERROR(BIF_P, BADARG); } if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) { goto error; } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); hp = HAlloc(BIF_P, 3); sz = binary_size(BIF_ARG_2); if ((i = erts_load_module(BIF_P, 0, BIF_P->group_leader, &BIF_ARG_1, code, sz)) < 0) { switch (i) { case -1: reason = am_badfile; break; case -2: reason = am_nofile; break; case -3: reason = am_not_purged; break; case -4: reason = am_atom_put("native_code", sizeof("native_code")-1); break; case -5: { /* * The module contains an on_load function. The loader * has loaded the module as usual, except that the * export entries does not point into the module, so it * is not possible to call any code in the module. */ ERTS_DECL_AM(on_load); reason = AM_on_load; break; } default: reason = am_badfile; break; } res = TUPLE2(hp, am_error, reason); goto done; } set_default_trace_pattern(BIF_ARG_1); res = TUPLE2(hp, am_module, BIF_ARG_1); done: erts_free_aligned_binary_bytes(temp_alloc); erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(res); }