Beispiel #1
0
void
erts_link_mbuf_to_proc(struct process *proc, ErlHeapFragment *bp)
{
    Eterm* htop = HEAP_TOP(proc);

    link_mbuf_to_proc(proc, bp);
    if (htop < HEAP_LIMIT(proc)) {
	*htop = make_pos_bignum_header(HEAP_LIMIT(proc)-htop-1);
	HEAP_TOP(proc) = HEAP_LIMIT(proc);
    }
}
Beispiel #2
0
static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
                                        Eterm list_in,
                                        Eterm tail_in)
{
    static const Uint CELLS_PER_RED = 60;

    Eterm *alloc_start, *alloc_top, *alloc_end;
    Uint cells_left, max_cells;
    Eterm list, tail;

    list = list_in;
    tail = tail_in;

    cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));

    ASSERT(HEAP_LIMIT(c_p) >= HEAP_TOP(c_p) + 2);
    alloc_start = HEAP_TOP(c_p);
    alloc_end = HEAP_LIMIT(c_p) - 2;
    alloc_top = alloc_start;

    /* Don't process more cells than we have reductions for. */
    alloc_end = MIN(alloc_top + (cells_left * 2), alloc_end);

    while (alloc_top < alloc_end && is_list(list)) {
        Eterm *pair = list_val(list);

        tail = CONS(alloc_top, CAR(pair), tail);
        list = CDR(pair);

        alloc_top += 2;
    }

    cells_left -= (alloc_top - alloc_start) / 2;
    HEAP_TOP(c_p) = alloc_top;

    ASSERT(cells_left >= 0 && cells_left <= max_cells);
    BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED);

    if (is_nil(list)) {
        BIF_RET(tail);
    } else if (is_list(list)) {
        ASSERT(is_list(tail));

        if (cells_left > CELLS_PER_RED) {
            return lists_reverse_alloc(c_p, list, tail);
        }

        BUMP_ALL_REDS(c_p);
        BIF_TRAP2(bif_export[BIF_lists_reverse_2], c_p, list, tail);
    }

    BIF_ERROR(c_p, BADARG);
}
Beispiel #3
0
/* Flush out our cached heap pointers to allow an ordinary HAlloc
*/
static void flush_env(ErlNifEnv* env)
{
    if (env->heap_frag == NULL) {
	ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
	ASSERT(env->hp >= HEAP_TOP(env->proc));
	ASSERT(env->hp <= HEAP_LIMIT(env->proc));	
	HEAP_TOP(env->proc) = env->hp;
    }
    else {
	ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
	ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
	env->heap_frag->used_size = env->hp - env->heap_frag->mem;
	ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
    }
}
Beispiel #4
0
void erts_factory_message_init(ErtsHeapFactory* factory,
			       Process* rp,
			       Eterm* hp,
			       ErlHeapFragment* bp)
{
    if (bp) {
	factory->mode     = FACTORY_HEAP_FRAGS;
	factory->p        = NULL;
	factory->hp_start = bp->mem;
	factory->hp       = hp ? hp : bp->mem;
	factory->hp_end   = bp->mem + bp->alloc_size;
	factory->off_heap = &bp->off_heap;
        factory->heap_frags = bp;
        factory->heap_frags_saved = bp;
        factory->alloc_type = ERTS_ALC_T_HEAP_FRAG;
        ASSERT(!bp->next);
    }
    else {
	factory->mode     = FACTORY_HALLOC;
	factory->p        = rp;
	factory->hp_start = hp;
	factory->hp       = hp;
	factory->hp_end   = HEAP_TOP(rp);
	factory->off_heap = &rp->off_heap;
        factory->heap_frags_saved = rp->mbuf;
        factory->heap_frags = NULL; /* not used */
        factory->alloc_type = 0; /* not used */
    }
    factory->off_heap_saved.first    = factory->off_heap->first;
    factory->off_heap_saved.overhead = factory->off_heap->overhead;

    ASSERT(factory->hp >= factory->hp_start && factory->hp <= factory->hp_end);
}
Beispiel #5
0
void print_memory_info(Process *p)
{
    if (p != NULL) {
        erts_printf("======================================\n");
        erts_printf("|| Memory info for %-12T ||\n",p->common.id);
        erts_printf("======================================\n");
        erts_printf("+- local heap ----%s-%s-%s-%s-+\n",
                    dashes,dashes,dashes,dashes);
        erts_printf("| Young | 0x%0*lx - (0x%0*lx) - 0x%0*lx - 0x%0*lx |\n",
                    PTR_SIZE, (unsigned long)HEAP_START(p),
                    PTR_SIZE, (unsigned long)HIGH_WATER(p),
                    PTR_SIZE, (unsigned long)HEAP_TOP(p),
                    PTR_SIZE, (unsigned long)HEAP_END(p));
        if (OLD_HEAP(p) != NULL)
            erts_printf("| Old   | 0x%0*lx - 0x%0*lx - 0x%0*lx   %*s     |\n",
                        PTR_SIZE, (unsigned long)OLD_HEAP(p),
                        PTR_SIZE, (unsigned long)OLD_HTOP(p),
                        PTR_SIZE, (unsigned long)OLD_HEND(p),
                        PTR_SIZE, "");
    } else {
        erts_printf("=================\n");
        erts_printf("|| Memory info ||\n");
        erts_printf("=================\n");
    }
    erts_printf("+-----------------%s-%s-%s-%s-+\n",dashes,dashes,dashes,dashes);
}
Beispiel #6
0
static int
within2(Eterm *ptr, Process *p, Eterm *real_htop)
{
    ErlHeapFragment* bp = MBUF(p);
    ErlMessage* mp = p->msg.first;
    Eterm *htop = real_htop ? real_htop : HEAP_TOP(p);

    if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) {
        return 1;
    }
    if (HEAP_START(p) <= ptr && ptr < htop) {
        return 1;
    }
    while (bp != NULL) {
        if (bp->mem <= ptr && ptr < bp->mem + bp->used_size) {
            return 1;
        }
        bp = bp->next;
    }
    while (mp) {
	if (mp->data.attached) {
	    ErlHeapFragment *hfp;
	    if (is_value(ERL_MESSAGE_TERM(mp)))
		hfp = mp->data.heap_frag;
	    else if (is_not_nil(ERL_MESSAGE_TOKEN(mp)))
		hfp = erts_dist_ext_trailer(mp->data.dist_ext);
	    else
		hfp = NULL;
	    if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->used_size)
		return 1;
	}
        mp = mp->next;
    }
    return 0;
}
Beispiel #7
0
ErlNifEnv* enif_alloc_env(void)
{
    struct enif_msg_environment_t* msg_env =
	erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
    Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */
	
    msg_env->env.hp = phony_heap; 
    msg_env->env.hp_end = phony_heap;
    msg_env->env.heap_frag = NULL;
    msg_env->env.mod_nif = NULL;
    msg_env->env.tmp_obj_list = NULL;
    msg_env->env.proc = &msg_env->phony_proc;
    memset(&msg_env->phony_proc, 0, sizeof(Process));
    HEAP_START(&msg_env->phony_proc) = phony_heap;
    HEAP_TOP(&msg_env->phony_proc) = phony_heap;
    HEAP_LIMIT(&msg_env->phony_proc) = phony_heap;
    HEAP_END(&msg_env->phony_proc) = phony_heap;
    MBUF(&msg_env->phony_proc) = NULL;
    msg_env->phony_proc.id = ERTS_INVALID_PID;
#ifdef FORCE_HEAP_FRAGS
    msg_env->phony_proc.space_verified = 0;
    msg_env->phony_proc.space_verified_from = NULL;
#endif
    return &msg_env->env;
}
Beispiel #8
0
void erts_post_nif(ErlNifEnv* env)
{
    erts_unblock_fpe(env->fpe_was_unmasked);
    if (env->heap_frag == NULL) {
	ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
	ASSERT(env->hp >= HEAP_TOP(env->proc));
	ASSERT(env->hp <= HEAP_LIMIT(env->proc));	
	HEAP_TOP(env->proc) = env->hp;
    }
    else {
	ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
	ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
	env->heap_frag->used_size = env->hp - env->heap_frag->mem;
	ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
    }
    free_tmp_objs(env);
}
Beispiel #9
0
/* Restore cached heap pointers to allow alloc_heap again.
*/
static void cache_env(ErlNifEnv* env)
{
    if (env->heap_frag == NULL) {
	ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
	ASSERT(env->hp <= HEAP_TOP(env->proc));
	ASSERT(env->hp <= HEAP_LIMIT(env->proc));	
	env->hp = HEAP_TOP(env->proc);
    }
    else {
	ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
	ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);       
	env->heap_frag = MBUF(env->proc);
	ASSERT(env->heap_frag != NULL);
	env->hp = env->heap_frag->mem + env->heap_frag->used_size;
	env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
    }
}
Beispiel #10
0
/*
 * This file implements a WF2Q+ scheduler as it has been in dummynet
 * since 2000.
 * The scheduler supports per-flow queues and has O(log N) complexity.
 *
 * WF2Q+ needs to drain entries from the idle heap so that we
 * can keep the sum of weights up to date. We can do it whenever
 * we get a chance, or periodically, or following some other
 * strategy. The function idle_check() drains at most N elements
 * from the idle heap.
 */
static void
idle_check(struct wf2qp_si *si, int n, int force)
{
    struct dn_heap *h = &si->idle_heap;
    while (n-- > 0 && h->elements > 0 &&
		(force || DN_KEY_LT(HEAP_TOP(h)->key, si->V))) {
	struct dn_queue *q = HEAP_TOP(h)->object;
        struct wf2qp_queue *alg_fq = (struct wf2qp_queue *)q;

        heap_extract(h, NULL);
        /* XXX to let the flowset delete the queue we should
	 * mark it as 'unused' by the scheduler.
	 */
        alg_fq->S = alg_fq->F + 1; /* Mark timestamp as invalid. */
        si->wsum -= q->fs->fs.par[0];	/* adjust sum of weights */
	if (si->wsum > 0)
		si->inv_wsum = ONE_FP/si->wsum;
    }
}
Beispiel #11
0
void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif)
{
    env->mod_nif = mod_nif;
    env->proc = p;
    env->hp = HEAP_TOP(p);
    env->hp_end = HEAP_LIMIT(p);
    env->heap_frag = NULL;
    env->fpe_was_unmasked = erts_block_fpe();
    env->tmp_obj_list = NULL;
}
Beispiel #12
0
void erts_check_for_holes(Process* p)
{
    ErlHeapFragment* hf;
    Eterm* start;

    if (p->flags & F_DISABLE_GC)
	return;

    start = p->last_htop ? p->last_htop : HEAP_START(p);
    check_memory(start, HEAP_TOP(p));
    p->last_htop = HEAP_TOP(p);

    for (hf = MBUF(p); hf != 0; hf = hf->next) {
	if (hf == p->last_mbuf) {
	    break;
	}
	check_memory(hf->mem, hf->mem+hf->used_size);
    }
    p->last_mbuf = MBUF(p);
}
Beispiel #13
0
void enif_clear_env(ErlNifEnv* env)
{
    struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env;
    Process* p = &menv->phony_proc;
    ASSERT(p == menv->env.proc);
    ASSERT(p->id == ERTS_INVALID_PID);
    ASSERT(MBUF(p) == menv->env.heap_frag);
    if (MBUF(p) != NULL) {
	erts_cleanup_offheap(&MSO(p));
	clear_offheap(&MSO(p));
	free_message_buffer(MBUF(p));
	MBUF(p) = NULL;
	menv->env.heap_frag = NULL;
    }
    ASSERT(HEAP_TOP(p) == HEAP_END(p));
    menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
    
    ASSERT(!is_offheap(&MSO(p)));
    free_tmp_objs(env);
}
Beispiel #14
0
/*
 * erts_check_heap and erts_check_memory will run through the heap
 * silently if everything is ok.  If there are strange (untagged) data
 * in the heap or wild pointers, the system will be halted with an
 * error message.
 */
void erts_check_heap(Process *p)
{
    ErlHeapFragment* bp = MBUF(p);

    erts_check_memory(p,HEAP_START(p),HEAP_TOP(p));
    if (OLD_HEAP(p) != NULL) {
        erts_check_memory(p,OLD_HEAP(p),OLD_HTOP(p));
    }

    while (bp) {
	erts_check_memory(p,bp->mem,bp->mem + bp->used_size);
        bp = bp->next;
    }
}
Beispiel #15
0
void erts_factory_proc_init(ErtsHeapFactory* factory, Process* p)
{
    /* This function does not use HAlloc to allocate on the heap
       as we do not want to use INIT_HEAP_MEM on the allocated
       heap as that completely destroys the DEBUG emulators
       performance. */
    ErlHeapFragment *bp = p->mbuf;
    factory->mode     = FACTORY_HALLOC;
    factory->p        = p;
    factory->hp_start = HEAP_TOP(p);
    factory->hp       = factory->hp_start;
    factory->hp_end   = HEAP_LIMIT(p);
    factory->off_heap = &p->off_heap;
    factory->message  = NULL;
    factory->off_heap_saved.first    = p->off_heap.first;
    factory->off_heap_saved.overhead = p->off_heap.overhead;
    factory->heap_frags_saved = bp;
    factory->heap_frags_saved_used = bp ? bp->used_size : 0;
    factory->heap_frags = NULL; /* not used */
    factory->alloc_type = 0; /* not used */

    HEAP_TOP(p) = HEAP_LIMIT(p);
}
Beispiel #16
0
static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp)
{    
    env->hp = hp;
    if (env->heap_frag == NULL) {       
	ASSERT(HEAP_LIMIT(env->proc) == env->hp_end);
	HEAP_TOP(env->proc) = env->hp;	
    }
    else {
	env->heap_frag->used_size = hp - env->heap_frag->mem;
	ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
    }
    hp = erts_heap_alloc(env->proc, need, MIN_HEAP_FRAG_SZ);
    env->heap_frag = MBUF(env->proc);
    env->hp = hp + need;
    env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;

    return hp;
}
Beispiel #17
0
ErtsMessage *
erts_try_alloc_message_on_heap(Process *pp,
			       erts_aint32_t *psp,
			       ErtsProcLocks *plp,
			       Uint sz,
			       Eterm **hpp,
			       ErlOffHeap **ohpp,
			       int *on_heap_p)
{
    int locked_main = 0;
    ErtsMessage *mp;

    ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ));

    if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
	goto in_message_fragment;
    else if (
	*plp & ERTS_PROC_LOCK_MAIN
	) {
    try_on_heap:
	if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
	    || (pp->flags & F_DISABLE_GC)
	    || HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) {
	    /*
	     * The heap is either potentially in an inconsistent
	     * state, or not large enough.
	     */
	    if (locked_main) {
		*plp &= ~ERTS_PROC_LOCK_MAIN;
		erts_proc_unlock(pp, ERTS_PROC_LOCK_MAIN);
	    }
	    goto in_message_fragment;
	}

	*hpp = HEAP_TOP(pp);
	HEAP_TOP(pp) = *hpp + sz;
	*ohpp = &MSO(pp);
	mp = erts_alloc_message(0, NULL);
	mp->data.attached = NULL;
	*on_heap_p = !0;
    }
    else if (pp && erts_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) {
	locked_main = 1;
	*psp = erts_atomic32_read_nob(&pp->state);
	*plp |= ERTS_PROC_LOCK_MAIN;
	goto try_on_heap;
    }
    else {
    in_message_fragment:
	if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) {
	    mp = erts_alloc_message(sz, hpp);
	    *ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap;
	}
	else {
	    mp = erts_alloc_message(0, NULL);
	    if (!sz) {
		*hpp = NULL;
		*ohpp = NULL;
	    }
	    else {
		ErlHeapFragment *bp;
		bp = new_message_buffer(sz);
		*hpp = &bp->mem[0];
		mp->data.heap_frag = bp;
		*ohpp = &bp->off_heap;
	    }
	}
	*on_heap_p = 0;
    }

    return mp;
}
Beispiel #18
0
void erts_factory_undo(ErtsHeapFactory* factory)
{
    ErlHeapFragment* bp;
    struct erl_off_heap_header *hdr, **hdr_nextp;

    switch (factory->mode) {
    case FACTORY_HALLOC:
    case FACTORY_STATIC:
	/* Cleanup off-heap
	 */
	hdr_nextp = NULL;
        for (hdr = factory->off_heap->first;
	     hdr != factory->off_heap_saved.first;
	     hdr = hdr->next) {

	    hdr_nextp = &hdr->next;
        }

        if (hdr_nextp != NULL) {
	    *hdr_nextp = NULL;
	    erts_cleanup_offheap(factory->off_heap);
	    factory->off_heap->first    = factory->off_heap_saved.first;
	    factory->off_heap->overhead = factory->off_heap_saved.overhead;
        }

        if (factory->mode == FACTORY_HALLOC) {
            /* Free heap frags
             */
            bp = factory->p->mbuf;
            if (bp != factory->heap_frags_saved) {
                do {
                    ErlHeapFragment *next_bp = bp->next;
                    ASSERT(bp->off_heap.first == NULL);
                    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
                                   ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
                    bp = next_bp;
                } while (bp != factory->heap_frags_saved);

                factory->p->mbuf = bp;
            }

            /* Rollback heap top
	     */

	    if (HEAP_START(factory->p) <= factory->hp_start
		&& factory->hp_start <= HEAP_LIMIT(factory->p)) {
		HEAP_TOP(factory->p) = factory->hp_start;
	    }

	    /* Fix last heap frag */
            if (factory->heap_frags_saved) {
                ASSERT(factory->heap_frags_saved == factory->p->mbuf);
                if (factory->hp_start != factory->heap_frags_saved->mem)
                    factory->heap_frags_saved->used_size = factory->heap_frags_saved_used;
		else {
                    factory->p->mbuf = factory->p->mbuf->next;
                    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, factory->heap_frags_saved,
                                   ERTS_HEAP_FRAG_SIZE(factory->heap_frags_saved->alloc_size));
                }
            }
        }
        break;

    case FACTORY_MESSAGE:
	if (factory->message->data.attached == ERTS_MSG_COMBINED_HFRAG)
	    factory->message->hfrag.next = factory->heap_frags;
	else
	    factory->message->data.heap_frag = factory->heap_frags;
	erts_cleanup_messages(factory->message);
	break;
    case FACTORY_TMP:
    case FACTORY_HEAP_FRAGS:
	erts_cleanup_offheap(factory->off_heap);
	factory->off_heap->first = NULL;

        bp = factory->heap_frags;
        while (bp != NULL) {
            ErlHeapFragment* next_bp = bp->next;

            ASSERT(bp->off_heap.first == NULL);
            ERTS_HEAP_FREE(factory->alloc_type, (void *) bp,
                           ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
            bp = next_bp;
        }
	break;

    case FACTORY_CLOSED: break;
    default:
	ASSERT(!"Invalid factory mode");
    }
    factory->mode = FACTORY_CLOSED;
#ifdef DEBUG
    factory->p = NULL;
    factory->hp = NULL;
    factory->heap_frags = NULL;
#endif
}
Beispiel #19
0
void erts_factory_proc_init(ErtsHeapFactory* factory,
			    Process* p)
{
    erts_factory_proc_prealloc_init(factory, p, HEAP_LIMIT(p) - HEAP_TOP(p));
}
Beispiel #20
0
BIF_RETTYPE lists_reverse_2(BIF_ALIST_2)
{
    Eterm list;
    Eterm tmp_list;
    Eterm result;
    Eterm* hp;
    Uint n;
    int max_iter;

    /*
     * Handle legal and illegal non-lists quickly.
     */
    if (is_nil(BIF_ARG_1)) {
	BIF_RET(BIF_ARG_2);
    } else if (is_not_list(BIF_ARG_1)) {
    error:
	BIF_ERROR(BIF_P, BADARG);
    }

    /*
     * First use the rest of the remaning heap space.
     */
    list = BIF_ARG_1;
    result = BIF_ARG_2;
    hp = HEAP_TOP(BIF_P);
    n = HeapWordsLeft(BIF_P) / 2;
    while (n != 0 && is_list(list)) {
	Eterm* pair = list_val(list);
	result = CONS(hp, CAR(pair), result);
	list = CDR(pair);
	hp += 2;
	n--;
    }
    HEAP_TOP(BIF_P) = hp;
    if (is_nil(list)) {
	BIF_RET(result);
    }

    /*
     * Calculate length of remaining list (up to a suitable limit).
     */
    max_iter = CONTEXT_REDS * 40;
    n = 0;
    tmp_list = list;
    while (max_iter-- > 0 && is_list(tmp_list)) {
	tmp_list = CDR(list_val(tmp_list));
	n++;
    }
    if (is_not_nil(tmp_list) && is_not_list(tmp_list)) {
	goto error;
    }

    /*
     * Now do one HAlloc() and continue reversing.
     */
    hp = HAlloc(BIF_P, 2*n);
    while (n != 0 && is_list(list)) {
	Eterm* pair = list_val(list);
	result = CONS(hp, CAR(pair), result);
	list = CDR(pair);
	hp += 2;
	n--;
    }
    if (is_nil(list)) {
	BIF_RET(result);
    } else {
	BUMP_ALL_REDS(BIF_P);
	BIF_TRAP2(bif_export[BIF_lists_reverse_2], BIF_P, list, result);
    }
}
Beispiel #21
0
void erts_factory_undo(ErtsHeapFactory* factory)
{
    ErlHeapFragment* bp;
    struct erl_off_heap_header *hdr, **hdr_nextp;

    switch (factory->mode) {
    case FACTORY_HALLOC:
    case FACTORY_STATIC:
	/* Cleanup off-heap
	 */
	hdr_nextp = NULL;
        for (hdr = factory->off_heap->first;
	     hdr != factory->off_heap_saved.first;
	     hdr = hdr->next) {

	    hdr_nextp = &hdr->next;
        }

        if (hdr_nextp != NULL) {
	    *hdr_nextp = NULL;
	    erts_cleanup_offheap(factory->off_heap);
	    factory->off_heap->first    = factory->off_heap_saved.first;
	    factory->off_heap->overhead = factory->off_heap_saved.overhead;
        }

        if (factory->mode == FACTORY_HALLOC) {
            /* Free heap frags
             */
            bp = factory->p->mbuf;
            if (bp != factory->heap_frags_saved) {
                do {
                    ErlHeapFragment *next_bp = bp->next;
                    ASSERT(bp->off_heap.first == NULL);
                    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
                                   ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
                    bp = next_bp;
                } while (bp != factory->heap_frags_saved);

                factory->p->mbuf = bp;
            }

            /* Rollback heap top
	     */
            if (factory->heap_frags_saved == NULL) { /* No heap frags when we started */
                ASSERT(factory->hp_start >= HEAP_START(factory->p));
                ASSERT(factory->hp_start <= HEAP_LIMIT(factory->p));

                HEAP_TOP(factory->p) = factory->hp_start;
            }
            else {
                ASSERT(factory->heap_frags_saved == factory->p->mbuf);
                if (factory->hp_start == factory->heap_frags_saved->mem) {
                    factory->p->mbuf = factory->p->mbuf->next;
                    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, factory->heap_frags_saved,
                                   ERTS_HEAP_FRAG_SIZE(factory->heap_frags_saved->alloc_size));
                }
                else if (factory->hp_start != factory->hp_end) {
                    unsigned remains = factory->hp_start - factory->heap_frags_saved->mem;
                    ASSERT(remains > 0 && remains < factory->heap_frags_saved->used_size);
                    factory->heap_frags_saved->used_size = remains;
                }
            }
        }
        break;

    case FACTORY_TMP:
    case FACTORY_HEAP_FRAGS:
	erts_cleanup_offheap(factory->off_heap);
	factory->off_heap->first = NULL;

        bp = factory->heap_frags;
        while (bp != NULL) {
            ErlHeapFragment* next_bp = bp->next;

            ASSERT(bp->off_heap.first == NULL);
            ERTS_HEAP_FREE(factory->alloc_type, (void *) bp,
                           ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
            bp = next_bp;
        }
	break;

    case FACTORY_CLOSED: break;
    default:
	ASSERT(!"Invalid factory mode");
    }
    factory->mode = FACTORY_CLOSED;
#ifdef DEBUG
    factory->p = NULL;
    factory->hp = NULL;
    factory->heap_frags = NULL;
#endif
}
Beispiel #22
0
/* XXX invariant: sch > 0 || V >= min(S in neh) */
static struct mbuf *
wf2qp_dequeue(struct dn_sch_inst *_si)
{
	/* Access scheduler instance private data */
	struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
	struct mbuf *m;
	struct dn_queue *q;
	struct dn_heap *sch = &si->sch_heap;
	struct dn_heap *neh = &si->ne_heap;
	struct wf2qp_queue *alg_fq;

	if (sch->elements == 0 && neh->elements == 0) {
		/* we have nothing to do. We could kill the idle heap
		 * altogether and reset V
		 */
		idle_check(si, 0x7fffffff, 1);
		si->V = 0;
		si->wsum = 0;	/* should be set already */
		return NULL;	/* quick return if nothing to do */
	}
	idle_check(si, 1, 0);	/* drain something from the idle heap */

	/* make sure at least one element is eligible, bumping V
	 * and moving entries that have become eligible.
	 * We need to repeat the first part twice, before and
	 * after extracting the candidate, or enqueue() will
	 * find the data structure in a wrong state.
	 */
  m = NULL;
  for(;;) {
	/*
	 * Compute V = max(V, min(S_i)). Remember that all elements
	 * in sch have by definition S_i <= V so if sch is not empty,
	 * V is surely the max and we must not update it. Conversely,
	 * if sch is empty we only need to look at neh.
	 * We don't need to move the queues, as it will be done at the
	 * next enqueue
	 */
	if (sch->elements == 0 && neh->elements > 0) {
		si->V = MAX64(si->V, HEAP_TOP(neh)->key);
	}
	while (neh->elements > 0 &&
		    DN_KEY_LEQ(HEAP_TOP(neh)->key, si->V)) {
		q = HEAP_TOP(neh)->object;
		alg_fq = (struct wf2qp_queue *)q;
		heap_extract(neh, NULL);
		heap_insert(sch, alg_fq->F, q);
	}
	if (m) /* pkt found in previous iteration */
		break;
	/* ok we have at least one eligible pkt */
	q = HEAP_TOP(sch)->object;
	alg_fq = (struct wf2qp_queue *)q;
	m = dn_dequeue(q);
	heap_extract(sch, NULL); /* Remove queue from heap. */
	si->V += (uint64_t)(m->m_pkthdr.len) * si->inv_wsum;
	alg_fq->S = alg_fq->F;  /* Update start time. */
	if (q->mq.head == 0) {	/* not backlogged any more. */
		heap_insert(&si->idle_heap, alg_fq->F, q);
	} else {			/* Still backlogged. */
		/* Update F, store in neh or sch */
		uint64_t len = q->mq.head->m_pkthdr.len;
		alg_fq->F += len * alg_fq->inv_w;
		if (DN_KEY_LEQ(alg_fq->S, si->V)) {
			heap_insert(sch, alg_fq->F, q);
		} else {
			heap_insert(neh, alg_fq->S, q);
		}
	}
    }
	return m;
}
Beispiel #23
0
static void print_process_memory(Process *p)
{
    ErlHeapFragment* bp = MBUF(p);

    erts_printf("==============================\n");
    erts_printf("|| Memory info for %T ||\n",p->common.id);
    erts_printf("==============================\n");

    erts_printf("-- %-*s ---%s-%s-%s-%s--\n",
                PTR_SIZE, "PCB", dashes, dashes, dashes, dashes);

    if (p->msg.first != NULL) {
        ErtsMessage* mp;
        erts_printf("  Message Queue:\n");
        mp = p->msg.first;
        while (mp != NULL) {
            erts_printf("| 0x%0*lx | 0x%0*lx |\n",PTR_SIZE,
                        ERL_MESSAGE_TERM(mp),PTR_SIZE,ERL_MESSAGE_TOKEN(mp));
            mp = mp->next;
        }
    }

    if (p->dictionary != NULL) {
        int n = ERTS_PD_SIZE(p->dictionary);
        Eterm *ptr = ERTS_PD_START(p->dictionary);
        erts_printf("  Dictionary: ");
        while (n--) erts_printf("0x%0*lx ",PTR_SIZE,(unsigned long)ptr++);
        erts_printf("\n");
    }

    if (p->arity > 0) {
        int n = p->arity;
        Eterm *ptr = p->arg_reg;
        erts_printf("  Argument Registers: ");
        while (n--) erts_printf("0x%0*lx ",PTR_SIZE,(unsigned long)*ptr++);
        erts_printf("\n");
    }

    erts_printf("  Trace Token: 0x%0*lx\n",PTR_SIZE,p->seq_trace_token);
    erts_printf("  Group Leader: 0x%0*lx\n",PTR_SIZE,p->group_leader);
    erts_printf("  Fvalue: 0x%0*lx\n",PTR_SIZE,p->fvalue);
    erts_printf("  Ftrace: 0x%0*lx\n",PTR_SIZE,p->ftrace);

    erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx %s-%s-+\n",
                PTR_SIZE, "Stack",
                PTR_SIZE, (unsigned long)STACK_TOP(p),
                PTR_SIZE, (unsigned long)STACK_START(p),
                dashes, dashes);
    print_untagged_memory(STACK_TOP(p),STACK_START(p));

    erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx 0x%0*lx 0x%0*lx +\n",
                PTR_SIZE, "Heap",
                PTR_SIZE, (unsigned long)HEAP_START(p),
                PTR_SIZE, (unsigned long)HIGH_WATER(p),
                PTR_SIZE, (unsigned long)HEAP_TOP(p),
                PTR_SIZE, (unsigned long)HEAP_END(p));
    print_untagged_memory(HEAP_START(p),HEAP_TOP(p));

    if (OLD_HEAP(p)) {
        erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx 0x%0*lx %s-+\n",
                    PTR_SIZE, "Old Heap",
                    PTR_SIZE, (unsigned long)OLD_HEAP(p),
                    PTR_SIZE, (unsigned long)OLD_HTOP(p),
                    PTR_SIZE, (unsigned long)OLD_HEND(p),
                    dashes);
        print_untagged_memory(OLD_HEAP(p),OLD_HTOP(p));
    }

    if (bp)
        erts_printf("+- %-*s -+-%s-%s-%s-%s-+\n",
                    PTR_SIZE, "heap fragments",
                    dashes, dashes, dashes, dashes);
    while (bp) {
	print_untagged_memory(bp->mem,bp->mem + bp->used_size);
        bp = bp->next;
    }
}
Beispiel #24
0
static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
{
    Eterm list;
    Eterm copy;
    Eterm last;
    Eterm* hp = NULL;
    Sint i;

    list = A;

    if (is_nil(list)) {
        BIF_RET(B);
    }

    if (is_not_list(list)) {
        BIF_ERROR(p, BADARG);
    }

    /* optimistic append on heap first */

    if ((i = HeapWordsLeft(p) / 2) < 4) {
        goto list_tail;
    }

    hp   = HEAP_TOP(p);
    copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
    list = CDR(list_val(list));
    hp  += 2;
    i   -= 2; /* don't use the last 2 words (extra i--;) */

    while(i-- && is_list(list)) {
        Eterm* listp = list_val(list);
        last = CONS(hp, CAR(listp), make_list(hp+2));
        list = CDR(listp);
        hp += 2;
    }

    /* A is proper and B is NIL return A as-is, don't update HTOP */

    if (is_nil(list) && is_nil(B)) {
        BIF_RET(A);
    }

    if (is_nil(list)) {
        HEAP_TOP(p) = hp;
        CDR(list_val(last)) = B;
        BIF_RET(copy);
    }

list_tail:

    if ((i = erts_list_length(list)) < 0) {
        BIF_ERROR(p, BADARG);
    }

    /* remaining list was proper and B is NIL */
    if (is_nil(B)) {
        BIF_RET(A);
    }

    if (hp) {
        /* Note: fall through case, already written
         * on the heap.
         * The last 2 words of the heap is not written yet
         */
        Eterm *hp_save = hp;
        ASSERT(i != 0);
        HEAP_TOP(p) = hp + 2;
        if (i == 1) {
            hp[0] = CAR(list_val(list));
            hp[1] = B;
            BIF_RET(copy);
        }
        hp   = HAlloc(p, 2*(i - 1));
        last = CONS(hp_save, CAR(list_val(list)), make_list(hp));
    } else {
        hp   = HAlloc(p, 2*i);
        copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
        hp  += 2;
    }

    list = CDR(list_val(list));
    i--;

    ASSERT(i > -1);
    while(i--) {
        Eterm* listp = list_val(list);
        last = CONS(hp, CAR(listp), make_list(hp+2));
        list = CDR(listp);
        hp  += 2;
    }

    CDR(list_val(last)) = B;
    BIF_RET(copy);
}
Beispiel #25
0
/*
 * The timer handler for dummynet. Time is computed in ticks, but
 * but the code is tolerant to the actual rate at which this is called.
 * Once complete, the function reschedules itself for the next tick.
 */
void
dummynet_task(void *context, int pending)
{
	struct timeval t;
	struct mq q = { NULL, NULL }; /* queue to accumulate results */

	CURVNET_SET((struct vnet *)context);

	DN_BH_WLOCK();

	/* Update number of lost(coalesced) ticks. */
	tick_lost += pending - 1;

	getmicrouptime(&t);
	/* Last tick duration (usec). */
	tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
	(t.tv_usec - dn_cfg.prev_t.tv_usec);
	/* Last tick vs standard tick difference (usec). */
	tick_delta = (tick_last * hz - 1000000) / hz;
	/* Accumulated tick difference (usec). */
	tick_delta_sum += tick_delta;

	dn_cfg.prev_t = t;

	/*
	* Adjust curr_time if the accumulated tick difference is
	* greater than the 'standard' tick. Since curr_time should
	* be monotonically increasing, we do positive adjustments
	* as required, and throttle curr_time in case of negative
	* adjustment.
	*/
	dn_cfg.curr_time++;
	if (tick_delta_sum - tick >= 0) {
		int diff = tick_delta_sum / tick;

		dn_cfg.curr_time += diff;
		tick_diff += diff;
		tick_delta_sum %= tick;
		tick_adjustment++;
	} else if (tick_delta_sum + tick <= 0) {
		dn_cfg.curr_time--;
		tick_diff--;
		tick_delta_sum += tick;
		tick_adjustment++;
	}

	/* serve pending events, accumulate in q */
	for (;;) {
		struct dn_id *p;    /* generic parameter to handler */

		if (dn_cfg.evheap.elements == 0 ||
		    DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key))
			break;
		p = HEAP_TOP(&dn_cfg.evheap)->object;
		heap_extract(&dn_cfg.evheap, NULL);

		if (p->type == DN_SCH_I) {
			serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time);
		} else { /* extracted a delay line */
			transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time);
		}
	}
	if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) {
		dn_cfg.expire_cycle = 0;
		dn_drain_scheduler();
		dn_drain_queue();
	}

	DN_BH_WUNLOCK();
	dn_reschedule();
	if (q.head != NULL)
		dummynet_send(q.head);
	CURVNET_RESTORE();
}