term_t bif_spawn0_1(term_t F, process_t *ctx) { process_t *proc; term_t mod, fun, args = nil; term_t cons = nil; term_t fridge; int i, nfree; if (!is_fun(F)) return A_BADARG; fridge = fun_fridge(F); nfree = int_value2(tup_size(fridge)); if (int_value2(fun_arity(F)) != nfree) return A_BADARG; for (i = 0; i < nfree; i++) lst_add(args, cons, tup_elts(fridge)[i], proc_gc_pool(ctx)); mod = fun_amod(F); fun = fun_afun(F); proc = proc_spawn(proc_code_base(ctx), proc_atoms(ctx), mod, fun, args); if (proc == 0) return A_BADARG; result(proc_pid(proc, proc_gc_pool(ctx))); return AI_OK; }
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2) { Sint arity; /* * Verify argument 2 (arity); arity must be >= 0. */ if (is_small(arg2)) { arity = signed_val(arg2); if (arity < 0) { error: BIF_ERROR(p, BADARG); } } else if (is_big(arg2) && !bignum_header_is_neg(*big_val(arg2))) { /* A positive bignum is OK, but can't possibly match. */ arity = -1; } else { /* Everything else (including negative bignum) is an error. */ goto error; } if (is_fun(arg1)) { ErlFunThing* funp = (ErlFunThing *) fun_val(arg1); if (funp->arity == (Uint) arity) { BIF_RET(am_true); } } else if (is_export(arg1)) { Export* exp = (Export *) (export_val(arg1)[1]); if (exp->info.mfa.arity == (Uint) arity) { BIF_RET(am_true); } } BIF_RET(am_false); }
int enif_is_fun(ErlNifEnv* env, ERL_NIF_TERM term) { return is_fun(term); }
void seek_live(term_t *tp, apr_memnode_t *newest, heap_t *hp) { term_t t = *tp; apr_memnode_t *node; term_box_t *ptr; // newest node - the node last generation the term may belong to // the node chain starts with the newest and goes to hp->gc_spot if (is_immed(t)) return; ptr = peel(t); node = newest; while (node != hp->gc_spot) { if (node_contains(node, ptr)) { // the term belongs to the newer generation // of terms; recurse to find possible references // to live terms in hp->gc_spot // only tuples, conses, funs (frozen) // and binaries (data, parent) contain references // order of popularity: // cons - tuple - binary - fun if (is_cons(t)) { seek_live(&ptr->cons.head, node, hp); seek_live(&ptr->cons.tail, node, hp); } else if (is_tuple(t)) { int i; int n = ptr->tuple.size; for (i = 0; i < n; i++) seek_live(&ptr->tuple.elts[i], node, hp); } else if (is_binary(t)) { if (ptr->binary.parent != noval) { term_box_t *parent; seek_live(&ptr->binary.parent, node, hp); parent = peel(ptr->binary.parent); ptr->binary.data = parent->binary.data + ptr->binary.offset; } } else if (is_fun(t)) { seek_live(&ptr->fun.frozen, node, hp); } return; } node = node->next; } if (node_contains(hp->gc_spot, ptr)) { // the term should be recreated // the term may have already been moved // and the term value has been replaced with // the buried reference to the new location if (is_grave(t)) { *tp = ptr->grave.skeleton; return; } // list - tuple - binary - fun - bignum - pid - float if (is_list(t)) { term_t cons = heap_cons2(hp, ptr->cons.head, ptr->cons.tail); term_box_t *box = peel(cons); seek_live(&box->cons.head, hp->gc_spot, hp); seek_live(&box->cons.tail, hp->gc_spot, hp); *tp = cons; } else if (is_tuple(t)) { term_t tuple = heap_tuple(hp, ptr->tuple.size); term_box_t *box = peel(tuple); int i; for (i = 0; i < ptr->tuple.size; i++) { box->tuple.elts[i] = ptr->tuple.elts[i]; seek_live(&box->tuple.elts[i], hp->gc_spot, hp); } *tp = tuple; } else if (is_binary(t)) { term_t parent = ptr->binary.parent; term_t b; if (parent == noval) b = heap_binary(hp, ptr->binary.bit_size, ptr->binary.data); else { apr_byte_t *data; seek_live(&parent, hp->gc_spot, hp); data = peel(parent)->binary.data + ptr->binary.offset; b = heap_binary_shared(hp, ptr->binary.bit_size, data, parent); } *tp = b; } else if (is_fun(t)) { term_t f = heap_fun(hp, ptr->fun.module, ptr->fun.function, ptr->fun.arity, ptr->fun.uniq, ptr->fun.index, ptr->fun.frozen); seek_live(&peel(f)->fun.frozen, hp->gc_spot, hp); *tp = f; } else if (is_bignum(t)) { mp_int ma = bignum_to_mp(t); *tp = heap_bignum(hp, SIGN(&ma), USED(&ma), DIGITS(&ma)); } else if (is_long_id(t)) { *tp = heap_long_id(hp, ptr->long_id.node, ptr->long_id.serial, ptr->long_id.tag_creation); } else // if (is_float(t)) { assert(is_float(t)); *tp = heap_float(hp, float_value(t)); } // bury the term ptr->grave.cross = MAGIC_CROSS; ptr->grave.skeleton = *tp; return; } else { // the term belong to the older generation or // to the literal pool of the module -- ignore return; } }
Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) { unsigned result; #if NR_ARG_REGS > 5 /* When NR_ARG_REGS > 5, we need to protect the process' input reduction count (which BEAM stores in def_arg_reg[5]) from being clobbered by the arch glue code. */ Eterm reds_in = p->def_arg_reg[5]; #endif #if NR_ARG_REGS > 4 Eterm o_reds = p->def_arg_reg[4]; #endif p->i = NULL; DPRINTF("cmd == %#x (%s)", cmd, code_str(cmd)); HIPE_CHECK_PCB(p); p->arity = 0; switch (cmd & 0xFF) { case HIPE_MODE_SWITCH_CMD_CALL: { /* BEAM calls a native code function */ unsigned arity = cmd >> 8; /* p->hipe.ncallee set in beam_emu */ if (p->cp == hipe_beam_pc_return) { /* Native called BEAM, which now tailcalls native. */ hipe_pop_beam_trap_frame(p); result = hipe_tailcall_to_native(p, arity, reg); break; } DPRINTF("calling %#lx/%u", (long)p->hipe.ncallee, arity); result = hipe_call_to_native(p, arity, reg); break; } case HIPE_MODE_SWITCH_CMD_CALL_CLOSURE: { /* BEAM calls a native code closure */ unsigned arity = cmd >> 8; /* #formals + #fvs (closure not counted) */ Eterm fun; ErlFunThing *funp; /* drop the fvs, move the closure, correct arity */ fun = reg[arity]; HIPE_ASSERT(is_fun(fun)); funp = (ErlFunThing*)fun_val(fun); HIPE_ASSERT(funp->num_free <= arity); arity -= funp->num_free; /* arity == #formals */ reg[arity] = fun; ++arity; /* correct for having added the closure */ /* HIPE_ASSERT(p->hipe.ncallee == (void(*)(void))funp->native_address); */ /* just like a normal call from now on */ /* p->hipe.ncallee set in beam_emu */ if (p->cp == hipe_beam_pc_return) { /* Native called BEAM, which now tailcalls native. */ hipe_pop_beam_trap_frame(p); result = hipe_tailcall_to_native(p, arity, reg); break; } DPRINTF("calling %#lx/%u", (long)p->hipe.ncallee, arity); result = hipe_call_to_native(p, arity, reg); break; } case HIPE_MODE_SWITCH_CMD_THROW: { /* BEAM just executed hipe_beam_pc_throw[] */ /* Native called BEAM, which now throws an exception back to native. */ DPRINTF("beam throws freason %#lx fvalue %#lx", p->freason, p->fvalue); hipe_pop_beam_trap_frame(p); do_throw_to_native: p->def_arg_reg[0] = exception_tag[GET_EXC_CLASS(p->freason)]; hipe_find_handler(p); result = hipe_throw_to_native(p); break; } case HIPE_MODE_SWITCH_CMD_RETURN: { /* BEAM just executed hipe_beam_pc_return[] */ /* Native called BEAM, which now returns back to native. */ /* pop trap frame off estack */ hipe_pop_beam_trap_frame(p); p->def_arg_reg[0] = reg[0]; result = hipe_return_to_native(p); break; } do_resume: case HIPE_MODE_SWITCH_CMD_RESUME: { /* BEAM just executed hipe_beam_pc_resume[] */ /* BEAM called native, which suspended. */ if (p->flags & F_TIMO) { /* XXX: The process will immediately execute 'clear_timeout', repeating these two statements. Remove them? */ p->flags &= ~F_TIMO; JOIN_MESSAGE(p); p->def_arg_reg[0] = 0; /* make_small(0)? */ } else p->def_arg_reg[0] = 1; /* make_small(1)? */ result = hipe_return_to_native(p); break; } default: erl_exit(1, "hipe_mode_switch: cmd %#x\r\n", cmd); } do_return_from_native: DPRINTF("result == %#x (%s)", result, code_str(result)); HIPE_CHECK_PCB(p); switch (result) { case HIPE_MODE_SWITCH_RES_RETURN: { hipe_return_from_native(p); reg[0] = p->def_arg_reg[0]; DPRINTF("returning with r(0) == %#lx", reg[0]); break; } case HIPE_MODE_SWITCH_RES_THROW: { DPRINTF("native throws freason %#lx fvalue %#lx", p->freason, p->fvalue); hipe_throw_from_native(p); break; } case HIPE_MODE_SWITCH_RES_TRAP: { /* * Native code called a BIF, which "failed" with a TRAP to BEAM. * Prior to returning, the BIF stored (see BIF_TRAP<N>): * the callee's address in p->def_arg_reg[3] * the callee's parameters in p->def_arg_reg[0..2] * the callee's arity in p->arity (for BEAM gc purposes) * * We need to remove the BIF's parameters from the native * stack: to this end hipe_${ARCH}_glue.S stores the BIF's * arity in p->hipe.narity. */ unsigned int i, is_recursive, callee_arity; /* Save p->arity, then update it with the original BIF's arity. Get rid of any stacked parameters in that call. */ /* XXX: hipe_call_from_native_is_recursive() copies data to reg[], which is useless in the TRAP case. Maybe write a specialised hipe_trap_from_native_is_recursive() later. */ callee_arity = p->arity; p->arity = p->hipe.narity; /* caller's arity */ is_recursive = hipe_call_from_native_is_recursive(p, reg); p->i = (Eterm *)(p->def_arg_reg[3]); p->arity = callee_arity; for (i = 0; i < p->arity; ++i) reg[i] = p->def_arg_reg[i]; if (is_recursive) hipe_push_beam_trap_frame(p, reg, p->arity); result = HIPE_MODE_SWITCH_RES_CALL; break; } case HIPE_MODE_SWITCH_RES_CALL: { /* Native code calls or tailcalls BEAM. * * p->i is the callee's BEAM code * p->arity is the callee's arity * p->def_arg_reg[] contains the register parameters * p->hipe.nsp[] contains the stacked parameters */ if (hipe_call_from_native_is_recursive(p, reg)) { /* BEAM called native, which now calls BEAM */ hipe_push_beam_trap_frame(p, reg, p->arity); } break; } case HIPE_MODE_SWITCH_RES_CALL_CLOSURE: { /* Native code calls or tailcalls a closure in BEAM * * In native code a call to a closure of arity n looks like * F(A1, ..., AN, Closure), * The BEAM code for a closure expects to get: * F(A1, ..., AN, FV1, ..., FVM, Closure) * (Where Ai is argument i and FVj is free variable j) * * p->hipe.closure contains the closure * p->def_arg_reg[] contains the register parameters * p->hipe.nsp[] contains the stacked parameters */ ErlFunThing *closure; unsigned num_free, arity, i, is_recursive; HIPE_ASSERT(is_fun(p->hipe.closure)); closure = (ErlFunThing*)fun_val(p->hipe.closure); num_free = closure->num_free; arity = closure->fe->arity; /* Store the arity in p->arity for the stack popping. */ /* Note: we already have the closure so only need to move arity values to reg[]. However, there are arity+1 parameters in the native code state that need to be removed. */ p->arity = arity+1; /* +1 for the closure */ /* Get parameters, don't do GC just yet. */ is_recursive = hipe_call_from_native_is_recursive(p, reg); if ((Sint)closure->fe->address[-1] < 0) { /* Unloaded. Let beam_emu.c:call_fun() deal with it. */ result = HIPE_MODE_SWITCH_RES_CALL_CLOSURE; } else { /* The BEAM code is present. Prepare to call it. */ /* Append the free vars after the actual parameters. */ for (i = 0; i < num_free; ++i) reg[arity+i] = closure->env[i]; /* Update arity to reflect the new parameters. */ arity += i; /* Make a call to the closure's BEAM code. */ p->i = closure->fe->address; /* Change result code to the faster plain CALL type. */ result = HIPE_MODE_SWITCH_RES_CALL; } /* Append the closure as the last parameter. Don't increment arity. */ reg[arity] = p->hipe.closure; if (is_recursive) { /* BEAM called native, which now calls BEAM. Need to put a trap-frame on the beam stack. This may cause GC, which is safe now that the arguments, free vars, and most importantly the closure, all are in reg[]. */ hipe_push_beam_trap_frame(p, reg, arity+1); } break; } case HIPE_MODE_SWITCH_RES_SUSPEND: { p->i = hipe_beam_pc_resume; p->arity = 0; erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (p->status != P_SUSPENDED) erts_add_to_runq(p); erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); goto do_schedule; } case HIPE_MODE_SWITCH_RES_WAIT: case HIPE_MODE_SWITCH_RES_WAIT_TIMEOUT: { /* same semantics, different debug trace messages */ #ifdef ERTS_SMP /* XXX: BEAM has different entries for the locked and unlocked cases. HiPE doesn't, so we must check dynamically. */ if (p->hipe_smp.have_receive_locks) p->hipe_smp.have_receive_locks = 0; else erts_smp_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); #endif p->i = hipe_beam_pc_resume; p->arity = 0; p->status = P_WAITING; erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); do_schedule: { #if !(NR_ARG_REGS > 5) int reds_in = p->def_arg_reg[5]; #endif p = schedule(p, reds_in - p->fcalls); #ifdef ERTS_SMP p->hipe_smp.have_receive_locks = 0; reg = p->scheduler_data->save_reg; #endif } { Eterm *argp; int i; argp = p->arg_reg; for (i = p->arity; --i >= 0;) reg[i] = argp[i]; } { #if !(NR_ARG_REGS > 5) Eterm reds_in; #endif #if !(NR_ARG_REGS > 4) Eterm o_reds; #endif reds_in = p->fcalls; o_reds = 0; if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) { o_reds = reds_in; reds_in = 0; p->fcalls = 0; } p->def_arg_reg[4] = o_reds; p->def_arg_reg[5] = reds_in; if (p->i == hipe_beam_pc_resume) { p->i = NULL; p->arity = 0; goto do_resume; } } HIPE_CHECK_PCB(p); result = HIPE_MODE_SWITCH_RES_CALL; p->def_arg_reg[3] = result; return p; } case HIPE_MODE_SWITCH_RES_APPLY: { Eterm mfa[3], args; unsigned int arity; void *address; hipe_pop_params(p, 3, &mfa[0]); /* Unroll the arglist onto reg[]. */ args = mfa[2]; arity = 0; while (is_list(args)) { if (arity < 255) { reg[arity++] = CAR(list_val(args)); args = CDR(list_val(args)); } else goto do_apply_fail; } if (is_not_nil(args)) goto do_apply_fail; /* find a native code entry point for {M,F,A} for a remote call */ address = hipe_get_remote_na(mfa[0], mfa[1], arity); if (!address) goto do_apply_fail; p->hipe.ncallee = (void(*)(void)) address; result = hipe_tailcall_to_native(p, arity, reg); goto do_return_from_native; do_apply_fail: p->freason = BADARG; goto do_throw_to_native; } default: erl_exit(1, "hipe_mode_switch: result %#x\r\n", result); } HIPE_CHECK_PCB(p); p->def_arg_reg[3] = result; #if NR_ARG_REGS > 4 p->def_arg_reg[4] = o_reds; #endif #if NR_ARG_REGS > 5 p->def_arg_reg[5] = reds_in; #endif return p; }
Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) { unsigned result; Eterm reds_in = p->def_arg_reg[5]; /* * Process is in the normal case scheduled out when reduction * count reach zero. When "save calls" is enabled reduction * count is subtracted with CONTEXT_REDS, i.e. initial reduction * count will be zero or less and process is scheduled out * when -CONTEXT_REDS is reached. * * HiPE does not support the "save calls" feature, so we switch * to using a positive reduction counter when executing in * hipe mode, but need to restore the "save calls" when * returning to beam. We also need to hide the save calls buffer * from BIFs. We do that by moving the saved calls buf to * suspended saved calls buf. * * Beam has initial reduction count in stored in p->def_arg_reg[5]. * * Beam expects -neg_o_reds to be found in p->def_arg_reg[4] * on return to beam. */ { struct saved_calls *scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL); if (scb) { reds_in += CONTEXT_REDS; p->fcalls += CONTEXT_REDS; ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, scb); } } p->flags |= F_HIPE_MODE; /* inform bifs where we are comming from... */ p->i = NULL; /* Set current_function to undefined. stdlib hibernate tests rely on it. */ p->current = NULL; DPRINTF("cmd == %#x (%s)", cmd, code_str(cmd)); HIPE_CHECK_PCB(p); p->arity = 0; switch (cmd & 0xFF) { case HIPE_MODE_SWITCH_CMD_CALL: { /* BEAM calls a native code function */ unsigned arity = cmd >> 8; /* p->hipe.u.ncallee set in beam_emu */ if (p->cp == hipe_beam_pc_return) { /* Native called BEAM, which now tailcalls native. */ hipe_pop_beam_trap_frame(p); result = hipe_tailcall_to_native(p, arity, reg); break; } DPRINTF("calling %#lx/%u", (long)p->hipe.u.ncallee, arity); result = hipe_call_to_native(p, arity, reg); break; } case HIPE_MODE_SWITCH_CMD_CALL_CLOSURE: { /* BEAM calls a native code closure */ unsigned arity = cmd >> 8; /* #formals + #fvs (closure not counted) */ Eterm fun; ErlFunThing *funp; /* drop the fvs, move the closure, correct arity */ fun = reg[arity]; HIPE_ASSERT(is_fun(fun)); funp = (ErlFunThing*)fun_val(fun); HIPE_ASSERT(funp->num_free <= arity); arity -= funp->num_free; /* arity == #formals */ reg[arity] = fun; ++arity; /* correct for having added the closure */ /* HIPE_ASSERT(p->hipe.u.ncallee == (void(*)(void))funp->native_address); */ /* just like a normal call from now on */ /* p->hipe.u.ncallee set in beam_emu */ if (p->cp == hipe_beam_pc_return) { /* Native called BEAM, which now tailcalls native. */ hipe_pop_beam_trap_frame(p); result = hipe_tailcall_to_native(p, arity, reg); break; } DPRINTF("calling %#lx/%u", (long)p->hipe.u.ncallee, arity); result = hipe_call_to_native(p, arity, reg); break; } case HIPE_MODE_SWITCH_CMD_THROW: { /* BEAM just executed hipe_beam_pc_throw[] */ /* Native called BEAM, which now throws an exception back to native. */ DPRINTF("beam throws freason %#lx fvalue %#lx", p->freason, p->fvalue); hipe_pop_beam_trap_frame(p); do_throw_to_native: p->def_arg_reg[0] = exception_tag[GET_EXC_CLASS(p->freason)]; hipe_find_handler(p); result = hipe_throw_to_native(p); break; } case HIPE_MODE_SWITCH_CMD_RETURN: { /* BEAM just executed hipe_beam_pc_return[] */ /* Native called BEAM, which now returns back to native. */ /* pop trap frame off estack */ hipe_pop_beam_trap_frame(p); p->def_arg_reg[0] = reg[0]; result = hipe_return_to_native(p); break; } do_resume: case HIPE_MODE_SWITCH_CMD_RESUME: { /* BEAM just executed hipe_beam_pc_resume[] */ /* BEAM called native, which suspended. */ if (p->flags & F_TIMO) { /* XXX: The process will immediately execute 'clear_timeout', repeating these two statements. Remove them? */ p->flags &= ~F_TIMO; JOIN_MESSAGE(p); p->def_arg_reg[0] = 0; /* make_small(0)? */ } else p->def_arg_reg[0] = 1; /* make_small(1)? */ result = hipe_return_to_native(p); break; } default: erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: cmd %#x\r\n", cmd); } do_return_from_native: DPRINTF("result == %#x (%s)", result, code_str(result)); HIPE_CHECK_PCB(p); switch (result) { case HIPE_MODE_SWITCH_RES_RETURN: { hipe_return_from_native(p); reg[0] = p->def_arg_reg[0]; DPRINTF("returning with r(0) == %#lx", reg[0]); break; } case HIPE_MODE_SWITCH_RES_THROW: { DPRINTF("native throws freason %#lx fvalue %#lx", p->freason, p->fvalue); hipe_throw_from_native(p); break; } case HIPE_MODE_SWITCH_RES_TRAP: { /* * Native code called a BIF, which "failed" with a TRAP to BEAM. * Prior to returning, the BIF stored (see BIF_TRAP<N>): * the callee's address in p->i * the callee's parameters in reg[0..2] * the callee's arity in p->arity (for BEAM gc purposes) * * We need to remove the BIF's parameters from the native * stack: to this end hipe_${ARCH}_glue.S stores the BIF's * arity in p->hipe.narity. * * If the BIF emptied the stack (typically hibernate), p->hipe.nstack * is NULL and there is no need to get rid of stacked parameters. */ unsigned int i, is_recursive = 0; if (p->hipe.nstack != NULL) { ASSERT(p->hipe.nsp != NULL); is_recursive = hipe_trap_from_native_is_recursive(p); } else { /* Some architectures (risc) need this re-reset of nsp as the * BIF wrapper do not detect stack change and causes an obsolete * stack pointer to be saved in p->hipe.nsp before return to us. */ p->hipe.nsp = NULL; } /* Schedule next process if current process was hibernated or is waiting for messages */ if (p->flags & F_HIBERNATE_SCHED) { p->flags &= ~F_HIBERNATE_SCHED; goto do_schedule; } if (!(erts_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) { for (i = 0; i < p->arity; ++i) p->arg_reg[i] = reg[i]; goto do_schedule; } if (is_recursive) hipe_push_beam_trap_frame(p, reg, p->arity); result = HIPE_MODE_SWITCH_RES_CALL_BEAM; break; } case HIPE_MODE_SWITCH_RES_CALL_EXPORTED: { /* Native code calls or tailcalls BEAM. * * p->hipe.u.callee_exp is the callee's export entry * p->arity is the callee's arity * p->def_arg_reg[] contains the register parameters * p->hipe.nsp[] contains the stacked parameters */ if (hipe_call_from_native_is_recursive(p, reg)) { /* BEAM called native, which now calls BEAM */ hipe_push_beam_trap_frame(p, reg, p->arity); } break; } case HIPE_MODE_SWITCH_RES_CALL_CLOSURE: { /* Native code calls or tailcalls a closure in BEAM * * In native code a call to a closure of arity n looks like * F(A1, ..., AN, Closure), * The BEAM code for a closure expects to get: * F(A1, ..., AN, FV1, ..., FVM, Closure) * (Where Ai is argument i and FVj is free variable j) * * p->hipe.u.closure contains the closure * p->def_arg_reg[] contains the register parameters * p->hipe.nsp[] contains the stacked parameters */ ErlFunThing *closure; unsigned num_free, arity, i, is_recursive; HIPE_ASSERT(is_fun(p->hipe.u.closure)); closure = (ErlFunThing*)fun_val(p->hipe.u.closure); num_free = closure->num_free; arity = closure->fe->arity; /* Store the arity in p->arity for the stack popping. */ /* Note: we already have the closure so only need to move arity values to reg[]. However, there are arity+1 parameters in the native code state that need to be removed. */ p->arity = arity+1; /* +1 for the closure */ /* Get parameters, don't do GC just yet. */ is_recursive = hipe_call_from_native_is_recursive(p, reg); if ((Sint)closure->fe->address[-1] < 0) { /* Unloaded. Let beam_emu.c:call_fun() deal with it. */ result = HIPE_MODE_SWITCH_RES_CALL_CLOSURE; } else { /* The BEAM code is present. Prepare to call it. */ /* Append the free vars after the actual parameters. */ for (i = 0; i < num_free; ++i) reg[arity+i] = closure->env[i]; /* Update arity to reflect the new parameters. */ arity += i; /* Make a call to the closure's BEAM code. */ p->i = closure->fe->address; /* Change result code to the faster plain CALL type. */ result = HIPE_MODE_SWITCH_RES_CALL_BEAM; } /* Append the closure as the last parameter. Don't increment arity. */ reg[arity] = p->hipe.u.closure; if (is_recursive) { /* BEAM called native, which now calls BEAM. Need to put a trap-frame on the beam stack. This may cause GC, which is safe now that the arguments, free vars, and most importantly the closure, all are in reg[]. */ hipe_push_beam_trap_frame(p, reg, arity+1); } break; } case HIPE_MODE_SWITCH_RES_SUSPEND: { p->i = hipe_beam_pc_resume; p->arity = 0; goto do_schedule; } case HIPE_MODE_SWITCH_RES_WAIT: case HIPE_MODE_SWITCH_RES_WAIT_TIMEOUT: { /* same semantics, different debug trace messages */ /* XXX: BEAM has different entries for the locked and unlocked cases. HiPE doesn't, so we must check dynamically. */ if (p->flags & F_HIPE_RECV_LOCKED) p->flags &= ~F_HIPE_RECV_LOCKED; else erts_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); p->i = hipe_beam_pc_resume; p->arity = 0; if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_ACTIVE); else if (!(p->flags & F_HIPE_RECV_YIELD)) erts_atomic32_read_band_relb(&p->state, ~ERTS_PSFLG_ACTIVE); else { /* Yielded from receive */ ERTS_VBUMP_ALL_REDS(p); p->flags &= ~F_HIPE_RECV_YIELD; } erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); do_schedule: { struct saved_calls *scb; scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, NULL); if (scb) ERTS_PROC_SET_SAVED_CALLS_BUF(p, scb); /* The process may have died while it was executing, if so we return out from native code to the interpreter */ if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) p->i = beam_exit; #ifdef DEBUG ASSERT(p->debug_reds_in == reds_in); #endif p->flags &= ~F_HIPE_MODE; ERTS_UNREQ_PROC_MAIN_LOCK(p); p = erts_schedule(NULL, p, reds_in - p->fcalls); ERTS_REQ_PROC_MAIN_LOCK(p); ASSERT(!(p->flags & F_HIPE_MODE)); p->flags &= ~F_HIPE_RECV_LOCKED; reg = p->scheduler_data->x_reg_array; } { Eterm *argp; int i; argp = p->arg_reg; for (i = p->arity; --i >= 0;) reg[i] = argp[i]; } { struct saved_calls *scb; reds_in = p->fcalls; p->def_arg_reg[5] = reds_in; #ifdef DEBUG p->debug_reds_in = reds_in; #endif if (p->i == hipe_beam_pc_resume) { p->flags |= F_HIPE_MODE; /* inform bifs where we are comming from... */ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL); if (scb) ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, scb); p->i = NULL; p->arity = 0; goto do_resume; } scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p); if (!scb) p->def_arg_reg[4] = 0; else { p->def_arg_reg[4] = CONTEXT_REDS; p->fcalls = -CONTEXT_REDS + reds_in; } } HIPE_CHECK_PCB(p); result = HIPE_MODE_SWITCH_RES_CALL_BEAM; p->def_arg_reg[3] = result; return p; } case HIPE_MODE_SWITCH_RES_APPLY: { Eterm mfa[3], args; unsigned int arity; void *address; hipe_pop_params(p, 3, &mfa[0]); /* Unroll the arglist onto reg[]. */ args = mfa[2]; arity = 0; while (is_list(args)) { if (arity < 255) { reg[arity++] = CAR(list_val(args)); args = CDR(list_val(args)); } else goto do_apply_fail; } if (is_not_nil(args)) goto do_apply_fail; /* find a native code entry point for {M,F,A} for a remote call */ address = hipe_get_remote_na(mfa[0], mfa[1], arity); if (!address) goto do_apply_fail; p->hipe.u.ncallee = (void(*)(void)) address; result = hipe_tailcall_to_native(p, arity, reg); goto do_return_from_native; do_apply_fail: p->freason = BADARG; goto do_throw_to_native; } default: erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: result %#x\r\n", result); } { struct saved_calls *scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, NULL); if (!scb) p->def_arg_reg[4] = 0; else { p->def_arg_reg[4] = CONTEXT_REDS; p->fcalls -= CONTEXT_REDS; ERTS_PROC_SET_SAVED_CALLS_BUF(p, scb); } } HIPE_CHECK_PCB(p); p->def_arg_reg[3] = result; #if NR_ARG_REGS > 5 /* * When NR_ARG_REGS > 5, we need to protect the process' input * reduction count (which BEAM stores in def_arg_reg[5]) from * being clobbered by the arch glue code. */ p->def_arg_reg[5] = reds_in; #endif p->flags &= ~F_HIPE_MODE; return p; }
//cons - tuple - binary - fun term_t heap_marshal(term_t t, heap_t *hp) { term_box_t *box; if (is_immed(t)) return t; box = peel(t); if (is_cons(t)) { term_t first = nil; term_t last = nil; do { term_box_t *cb = peel(t); term_t v = heap_marshal(cb->cons.head, hp); cons_up(first, last, v, hp); t = cb->cons.tail; } while (is_cons(t)); if (t != nil) peel(last)->cons.tail = heap_marshal(t, hp); return first; } else if (is_tuple(t)) { int n = box->tuple.size; term_t tuple = heap_tuple(hp, n); term_box_t *tb = peel(tuple); int i; for (i = 0; i < n; i++) tb->tuple.elts[i] = heap_marshal(box->tuple.elts[i], hp); return tuple; } else if (is_binary(t)) { //NB: for shared binaries parent not copied; shared becomes root term_t binary = heap_binary(hp, box->binary.bit_size, box->binary.data); return binary; } else if (is_bignum(t)) { bignum_t *bb = (bignum_t *)peel(t); term_t biggie = heap_bignum(hp, bb->sign, bb->used, bb->dp); return biggie; } else if (is_float(t)) { term_t f = heap_float(hp, float_value(t)); return f; } else if (is_fun(t)) { term_t fun = heap_fun(hp, box->fun.module, box->fun.function, box->fun.arity, box->fun.uniq, box->fun.index, heap_marshal(box->fun.frozen, hp)); return fun; } else // long_id { term_t id; assert(is_long_id(t)); id = heap_long_id(hp, box->long_id.node, box->long_id.serial, box->long_id.tag_creation); return id; } }