void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *first_bp) { if (first_bp) { ErlHeapFragment *bp = first_bp; while (1) { /* Move any off_heap's into the process */ if (bp->off_heap.first != NULL) { struct erl_off_heap_header** next_p = &bp->off_heap.first; while (*next_p != NULL) { next_p = &((*next_p)->next); } *next_p = MSO(proc).first; MSO(proc).first = bp->off_heap.first; bp->off_heap.first = NULL; OH_OVERHEAD(&(MSO(proc)), bp->off_heap.overhead); } MBUF_SIZE(proc) += bp->used_size; if (!bp->next) break; bp = bp->next; } /* Link the message buffer */ bp->next = MBUF(proc); MBUF(proc) = first_bp; } }
ErlNifEnv* enif_alloc_env(void) { struct enif_msg_environment_t* msg_env = erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t)); Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */ msg_env->env.hp = phony_heap; msg_env->env.hp_end = phony_heap; msg_env->env.heap_frag = NULL; msg_env->env.mod_nif = NULL; msg_env->env.tmp_obj_list = NULL; msg_env->env.proc = &msg_env->phony_proc; memset(&msg_env->phony_proc, 0, sizeof(Process)); HEAP_START(&msg_env->phony_proc) = phony_heap; HEAP_TOP(&msg_env->phony_proc) = phony_heap; HEAP_LIMIT(&msg_env->phony_proc) = phony_heap; HEAP_END(&msg_env->phony_proc) = phony_heap; MBUF(&msg_env->phony_proc) = NULL; msg_env->phony_proc.id = ERTS_INVALID_PID; #ifdef FORCE_HEAP_FRAGS msg_env->phony_proc.space_verified = 0; msg_env->phony_proc.space_verified_from = NULL; #endif return &msg_env->env; }
static int within2(Eterm *ptr, Process *p, Eterm *real_htop) { ErlHeapFragment* bp = MBUF(p); ErlMessage* mp = p->msg.first; Eterm *htop = real_htop ? real_htop : HEAP_TOP(p); if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) { return 1; } if (HEAP_START(p) <= ptr && ptr < htop) { return 1; } while (bp != NULL) { if (bp->mem <= ptr && ptr < bp->mem + bp->used_size) { return 1; } bp = bp->next; } while (mp) { if (mp->data.attached) { ErlHeapFragment *hfp; if (is_value(ERL_MESSAGE_TERM(mp))) hfp = mp->data.heap_frag; else if (is_not_nil(ERL_MESSAGE_TOKEN(mp))) hfp = erts_dist_ext_trailer(mp->data.dist_ext); else hfp = NULL; if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->used_size) return 1; } mp = mp->next; } return 0; }
static ERTS_INLINE void link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp) { if (bp) { /* Link the message buffer */ bp->next = MBUF(proc); MBUF(proc) = bp; MBUF_SIZE(proc) += bp->size; MSO(proc).overhead += proc->heap_sz; /* Force GC */ /* Move any binaries into the process */ if (bp->off_heap.mso != NULL) { ProcBin** next_p = &bp->off_heap.mso; while (*next_p != NULL) { next_p = &((*next_p)->next); } *next_p = MSO(proc).mso; MSO(proc).mso = bp->off_heap.mso; bp->off_heap.mso = NULL; MSO(proc).overhead += bp->off_heap.overhead; } /* Move any funs into the process */ #ifndef HYBRID if (bp->off_heap.funs != NULL) { ErlFunThing** next_p = &bp->off_heap.funs; while (*next_p != NULL) { next_p = &((*next_p)->next); } *next_p = MSO(proc).funs; MSO(proc).funs = bp->off_heap.funs; bp->off_heap.funs = NULL; } #endif /* Move any external things into the process */ if (bp->off_heap.externals != NULL) { ExternalThing** next_p = &bp->off_heap.externals; while (*next_p != NULL) { next_p = &((*next_p)->next); } *next_p = MSO(proc).externals; MSO(proc).externals = bp->off_heap.externals; bp->off_heap.externals = NULL; } } }
void enif_clear_env(ErlNifEnv* env) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env; Process* p = &menv->phony_proc; ASSERT(p == menv->env.proc); ASSERT(p->id == ERTS_INVALID_PID); ASSERT(MBUF(p) == menv->env.heap_frag); if (MBUF(p) != NULL) { erts_cleanup_offheap(&MSO(p)); clear_offheap(&MSO(p)); free_message_buffer(MBUF(p)); MBUF(p) = NULL; menv->env.heap_frag = NULL; } ASSERT(HEAP_TOP(p) == HEAP_END(p)); menv->env.hp = menv->env.hp_end = HEAP_TOP(p); ASSERT(!is_offheap(&MSO(p))); free_tmp_objs(env); }
void erts_check_for_holes(Process* p) { ErlHeapFragment* hf; Eterm* start; if (p->flags & F_DISABLE_GC) return; start = p->last_htop ? p->last_htop : HEAP_START(p); check_memory(start, HEAP_TOP(p)); p->last_htop = HEAP_TOP(p); for (hf = MBUF(p); hf != 0; hf = hf->next) { if (hf == p->last_mbuf) { break; } check_memory(hf->mem, hf->mem+hf->used_size); } p->last_mbuf = MBUF(p); }
/* * erts_check_heap and erts_check_memory will run through the heap * silently if everything is ok. If there are strange (untagged) data * in the heap or wild pointers, the system will be halted with an * error message. */ void erts_check_heap(Process *p) { ErlHeapFragment* bp = MBUF(p); erts_check_memory(p,HEAP_START(p),HEAP_TOP(p)); if (OLD_HEAP(p) != NULL) { erts_check_memory(p,OLD_HEAP(p),OLD_HTOP(p)); } while (bp) { erts_check_memory(p,bp->mem,bp->mem + bp->used_size); bp = bp->next; } }
static ERTS_INLINE void link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp) { if (bp) { /* Link the message buffer */ bp->next = MBUF(proc); MBUF(proc) = bp; MBUF_SIZE(proc) += bp->used_size; FLAGS(proc) |= F_FORCE_GC; /* Move any off_heap's into the process */ if (bp->off_heap.first != NULL) { struct erl_off_heap_header** next_p = &bp->off_heap.first; while (*next_p != NULL) { next_p = &((*next_p)->next); } *next_p = MSO(proc).first; MSO(proc).first = bp->off_heap.first; bp->off_heap.first = NULL; OH_OVERHEAD(&(MSO(proc)), bp->off_heap.overhead); } } }
/* Restore cached heap pointers to allow alloc_heap again. */ static void cache_env(ErlNifEnv* env) { if (env->heap_frag == NULL) { ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); ASSERT(env->hp <= HEAP_TOP(env->proc)); ASSERT(env->hp <= HEAP_LIMIT(env->proc)); env->hp = HEAP_TOP(env->proc); } else { ASSERT(env->hp_end != HEAP_LIMIT(env->proc)); ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); env->heap_frag = MBUF(env->proc); ASSERT(env->heap_frag != NULL); env->hp = env->heap_frag->mem + env->heap_frag->used_size; env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size; } }
static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp) { env->hp = hp; if (env->heap_frag == NULL) { ASSERT(HEAP_LIMIT(env->proc) == env->hp_end); HEAP_TOP(env->proc) = env->hp; } else { env->heap_frag->used_size = hp - env->heap_frag->mem; ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); } hp = erts_heap_alloc(env->proc, need, MIN_HEAP_FRAG_SZ); env->heap_frag = MBUF(env->proc); env->hp = hp + need; env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size; return hp; }
static void print_process_memory(Process *p) { ErlHeapFragment* bp = MBUF(p); erts_printf("==============================\n"); erts_printf("|| Memory info for %T ||\n",p->common.id); erts_printf("==============================\n"); erts_printf("-- %-*s ---%s-%s-%s-%s--\n", PTR_SIZE, "PCB", dashes, dashes, dashes, dashes); if (p->msg.first != NULL) { ErtsMessage* mp; erts_printf(" Message Queue:\n"); mp = p->msg.first; while (mp != NULL) { erts_printf("| 0x%0*lx | 0x%0*lx |\n",PTR_SIZE, ERL_MESSAGE_TERM(mp),PTR_SIZE,ERL_MESSAGE_TOKEN(mp)); mp = mp->next; } } if (p->dictionary != NULL) { int n = ERTS_PD_SIZE(p->dictionary); Eterm *ptr = ERTS_PD_START(p->dictionary); erts_printf(" Dictionary: "); while (n--) erts_printf("0x%0*lx ",PTR_SIZE,(unsigned long)ptr++); erts_printf("\n"); } if (p->arity > 0) { int n = p->arity; Eterm *ptr = p->arg_reg; erts_printf(" Argument Registers: "); while (n--) erts_printf("0x%0*lx ",PTR_SIZE,(unsigned long)*ptr++); erts_printf("\n"); } erts_printf(" Trace Token: 0x%0*lx\n",PTR_SIZE,p->seq_trace_token); erts_printf(" Group Leader: 0x%0*lx\n",PTR_SIZE,p->group_leader); erts_printf(" Fvalue: 0x%0*lx\n",PTR_SIZE,p->fvalue); erts_printf(" Ftrace: 0x%0*lx\n",PTR_SIZE,p->ftrace); erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx %s-%s-+\n", PTR_SIZE, "Stack", PTR_SIZE, (unsigned long)STACK_TOP(p), PTR_SIZE, (unsigned long)STACK_START(p), dashes, dashes); print_untagged_memory(STACK_TOP(p),STACK_START(p)); erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx 0x%0*lx 0x%0*lx +\n", PTR_SIZE, "Heap", PTR_SIZE, (unsigned long)HEAP_START(p), PTR_SIZE, (unsigned long)HIGH_WATER(p), PTR_SIZE, (unsigned long)HEAP_TOP(p), PTR_SIZE, (unsigned long)HEAP_END(p)); print_untagged_memory(HEAP_START(p),HEAP_TOP(p)); if (OLD_HEAP(p)) { erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx 0x%0*lx %s-+\n", PTR_SIZE, "Old Heap", PTR_SIZE, (unsigned long)OLD_HEAP(p), PTR_SIZE, (unsigned long)OLD_HTOP(p), PTR_SIZE, (unsigned long)OLD_HEND(p), dashes); print_untagged_memory(OLD_HEAP(p),OLD_HTOP(p)); } if (bp) erts_printf("+- %-*s -+-%s-%s-%s-%s-+\n", PTR_SIZE, "heap fragments", dashes, dashes, dashes, dashes); while (bp) { print_untagged_memory(bp->mem,bp->mem + bp->used_size); bp = bp->next; } }
/* * Garbage collect a process. * * p: Pointer to the process structure. * need: Number of Eterm words needed on the heap. * objv: Array of terms to add to rootset; that is to preserve. * nobj: Number of objects in objv. */ int erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) { int ret; struct slave_syscall_gc *cmd = erts_alloc(ERTS_ALC_T_TMP, sizeof(*cmd)); Eterm *dram_objv; int copy_objv = nobj != 0 && !epiphany_in_dram(objv); if (copy_objv) { dram_objv = erts_alloc(ERTS_ALC_T_TMP, nobj*sizeof(Eterm)); memcpy(dram_objv, objv, nobj*sizeof(Eterm)); } else { dram_objv = objv; } #ifdef DEBUG { int i; for (i = 0; i < nobj; i++) ASSERT(is_immed(objv[i]) || epiphany_in_dram(ptr_val(objv[i]))); } #endif cmd->need = need; cmd->objv = dram_objv; cmd->nobj = nobj; slave_state_swapout(p, &cmd->state); erts_master_syscall(SLAVE_SYSCALL_GC, cmd); if (copy_objv) { memcpy(objv, dram_objv, nobj*sizeof(Eterm)); erts_free(ERTS_ALC_T_TMP, dram_objv); } /* * The garbage collector will have set mbuf to NULL without freeing it. We * do so here. See remove_message_buffers in the master. */ if (MBUF(p) != NULL) { free_message_buffer(MBUF(p)); ASSERT(cmd->state.mbuf == NULL); } slave_state_swapin(p, &cmd->state); #ifdef CHECK_FOR_HOLES /* * We intentionally do not rescan the areas copied by the GC. * We trust the GC not to leave any holes. */ p->last_htop = p->htop; p->last_mbuf = 0; #endif #ifdef DEBUG /* * The scanning for pointers from the old_heap into the new_heap or * heap fragments turned out to be costly, so we remember how far we * have scanned this time and will start scanning there next time. * (We will not detect wild writes into the old heap, or modifications * of the old heap in-between garbage collections.) */ p->last_old_htop = p->old_htop; #endif ret = cmd->ret; erts_free(ERTS_ALC_T_TMP, cmd); return ret; }
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env; ErtsProcLocks rp_locks = 0; Process* rp; Process* c_p; ErlHeapFragment* frags; #if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) ErtsProcLocks rp_had_locks; #endif Eterm receiver = to_pid->pid; int flush_me = 0; if (env != NULL) { c_p = env->proc; if (receiver == c_p->id) { rp_locks = ERTS_PROC_LOCK_MAIN; flush_me = 1; } } else { #ifdef ERTS_SMP c_p = NULL; #else erl_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM"); #endif } #if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) rp_had_locks = rp_locks; #endif rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC); if (rp == NULL) { ASSERT(env == NULL || receiver != c_p->id); return 0; } flush_env(msg_env); frags = menv->env.heap_frag; ASSERT(frags == MBUF(&menv->phony_proc)); if (frags != NULL) { /* Move all offheap's from phony proc to the first fragment. Quick and dirty, but erts_move_msg_mbuf_to_heap doesn't care. */ ASSERT(!is_offheap(&frags->off_heap)); frags->off_heap = MSO(&menv->phony_proc); clear_offheap(&MSO(&menv->phony_proc)); menv->env.heap_frag = NULL; MBUF(&menv->phony_proc) = NULL; } ASSERT(!is_offheap(&MSO(&menv->phony_proc))); if (flush_me) { flush_env(env); /* Needed for ERTS_HOLE_CHECK */ } erts_queue_message(rp, &rp_locks, frags, msg, am_undefined); if (rp_locks) { ERTS_SMP_LC_ASSERT(rp_locks == (rp_had_locks | (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS))); erts_smp_proc_unlock(rp, (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS)); } erts_smp_proc_dec_refc(rp); if (flush_me) { cache_env(env); } return 1; }