Exemplo n.º 1
0
void
free_message_buffer(ErlHeapFragment* bp)
{
    erts_cleanup_offheap(&bp->off_heap);
    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG,
		   (void *) bp,
		   (sizeof(ErlHeapFragment)
		    - sizeof(Eterm)
		    + bp->size*sizeof(Eterm)));
}
Exemplo n.º 2
0
void
free_message_buffer(ErlHeapFragment* bp)
{
    ASSERT(bp != NULL);
    do {
	ErlHeapFragment* next_bp = bp->next;

	erts_cleanup_offheap(&bp->off_heap);
	ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
		       ERTS_HEAP_FRAG_SIZE(bp->alloc_size));	
	bp = next_bp;
    }while (bp != NULL);
}
Exemplo n.º 3
0
Arquivo: erl_init.c Projeto: Luter/otp
static void
system_cleanup(int flush_async)
{
    /*
     * Make sure only one thread exits the runtime system.
     */
    if (erts_atomic_inc_read_nob(&exiting) != 1) {
	/*
	 * Another thread is currently exiting the system;
	 * wait for it to do its job.
	 */
#ifdef ERTS_SMP
	if (erts_thr_progress_is_managed_thread()) {
	    /*
	     * The exiting thread might be waiting for
	     * us to block; need to update status...
	     */
	    erts_thr_progress_active(NULL, 0);
	    erts_thr_progress_prepare_wait(NULL);
	}
#endif
	/* Wait forever... */
	while (1)
	    erts_milli_sleep(10000000);
    }

    /* No cleanup wanted if ...
     * 1. we are about to do an abnormal exit
     * 2. we haven't finished initializing, or
     * 3. another thread than the main thread is performing the exit
     *    (in threaded non smp case).
     */

    if (!flush_async
	|| !erts_initialized
#if defined(USE_THREADS) && !defined(ERTS_SMP)
	|| !erts_equal_tids(main_thread, erts_thr_self())
#endif
	)
	return;

#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
#endif

#ifdef HYBRID
    if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                (void *)ma_src_stack);
    if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                (void *)ma_dst_stack);
    if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                   (void *)ma_offset_stack);
    ma_src_stack = NULL;
    ma_dst_stack = NULL;
    ma_offset_stack = NULL;
    erts_cleanup_offheap(&erts_global_offheap);
#endif

#if defined(HYBRID) && !defined(INCREMENTAL)
    if (global_heap) {
	ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
		       (void*) global_heap,
		       sizeof(Eterm) * global_heap_sz);
    }
    global_heap = NULL;
#endif

#ifdef INCREMENTAL
    erts_cleanup_incgc();
#endif

    erts_exit_flush_async();
}
Exemplo n.º 4
0
void erts_factory_undo(ErtsHeapFactory* factory)
{
    ErlHeapFragment* bp;
    struct erl_off_heap_header *hdr, **hdr_nextp;

    switch (factory->mode) {
    case FACTORY_HALLOC:
    case FACTORY_STATIC:
	/* Cleanup off-heap
	 */
	hdr_nextp = NULL;
        for (hdr = factory->off_heap->first;
	     hdr != factory->off_heap_saved.first;
	     hdr = hdr->next) {

	    hdr_nextp = &hdr->next;
        }

        if (hdr_nextp != NULL) {
	    *hdr_nextp = NULL;
	    erts_cleanup_offheap(factory->off_heap);
	    factory->off_heap->first    = factory->off_heap_saved.first;
	    factory->off_heap->overhead = factory->off_heap_saved.overhead;
        }

        if (factory->mode == FACTORY_HALLOC) {
            /* Free heap frags
             */
            bp = factory->p->mbuf;
            if (bp != factory->heap_frags_saved) {
                do {
                    ErlHeapFragment *next_bp = bp->next;
                    ASSERT(bp->off_heap.first == NULL);
                    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
                                   ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
                    bp = next_bp;
                } while (bp != factory->heap_frags_saved);

                factory->p->mbuf = bp;
            }

            /* Rollback heap top
	     */

	    if (HEAP_START(factory->p) <= factory->hp_start
		&& factory->hp_start <= HEAP_LIMIT(factory->p)) {
		HEAP_TOP(factory->p) = factory->hp_start;
	    }

	    /* Fix last heap frag */
            if (factory->heap_frags_saved) {
                ASSERT(factory->heap_frags_saved == factory->p->mbuf);
                if (factory->hp_start != factory->heap_frags_saved->mem)
                    factory->heap_frags_saved->used_size = factory->heap_frags_saved_used;
		else {
                    factory->p->mbuf = factory->p->mbuf->next;
                    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, factory->heap_frags_saved,
                                   ERTS_HEAP_FRAG_SIZE(factory->heap_frags_saved->alloc_size));
                }
            }
        }
        break;

    case FACTORY_MESSAGE:
	if (factory->message->data.attached == ERTS_MSG_COMBINED_HFRAG)
	    factory->message->hfrag.next = factory->heap_frags;
	else
	    factory->message->data.heap_frag = factory->heap_frags;
	erts_cleanup_messages(factory->message);
	break;
    case FACTORY_TMP:
    case FACTORY_HEAP_FRAGS:
	erts_cleanup_offheap(factory->off_heap);
	factory->off_heap->first = NULL;

        bp = factory->heap_frags;
        while (bp != NULL) {
            ErlHeapFragment* next_bp = bp->next;

            ASSERT(bp->off_heap.first == NULL);
            ERTS_HEAP_FREE(factory->alloc_type, (void *) bp,
                           ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
            bp = next_bp;
        }
	break;

    case FACTORY_CLOSED: break;
    default:
	ASSERT(!"Invalid factory mode");
    }
    factory->mode = FACTORY_CLOSED;
#ifdef DEBUG
    factory->p = NULL;
    factory->hp = NULL;
    factory->heap_frags = NULL;
#endif
}
Exemplo n.º 5
0
void erts_factory_undo(ErtsHeapFactory* factory)
{
    ErlHeapFragment* bp;
    struct erl_off_heap_header *hdr, **hdr_nextp;

    switch (factory->mode) {
    case FACTORY_HALLOC:
    case FACTORY_STATIC:
	/* Cleanup off-heap
	 */
	hdr_nextp = NULL;
        for (hdr = factory->off_heap->first;
	     hdr != factory->off_heap_saved.first;
	     hdr = hdr->next) {

	    hdr_nextp = &hdr->next;
        }

        if (hdr_nextp != NULL) {
	    *hdr_nextp = NULL;
	    erts_cleanup_offheap(factory->off_heap);
	    factory->off_heap->first    = factory->off_heap_saved.first;
	    factory->off_heap->overhead = factory->off_heap_saved.overhead;
        }

        if (factory->mode == FACTORY_HALLOC) {
            /* Free heap frags
             */
            bp = factory->p->mbuf;
            if (bp != factory->heap_frags_saved) {
                do {
                    ErlHeapFragment *next_bp = bp->next;
                    ASSERT(bp->off_heap.first == NULL);
                    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
                                   ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
                    bp = next_bp;
                } while (bp != factory->heap_frags_saved);

                factory->p->mbuf = bp;
            }

            /* Rollback heap top
	     */
            if (factory->heap_frags_saved == NULL) { /* No heap frags when we started */
                ASSERT(factory->hp_start >= HEAP_START(factory->p));
                ASSERT(factory->hp_start <= HEAP_LIMIT(factory->p));

                HEAP_TOP(factory->p) = factory->hp_start;
            }
            else {
                ASSERT(factory->heap_frags_saved == factory->p->mbuf);
                if (factory->hp_start == factory->heap_frags_saved->mem) {
                    factory->p->mbuf = factory->p->mbuf->next;
                    ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, factory->heap_frags_saved,
                                   ERTS_HEAP_FRAG_SIZE(factory->heap_frags_saved->alloc_size));
                }
                else if (factory->hp_start != factory->hp_end) {
                    unsigned remains = factory->hp_start - factory->heap_frags_saved->mem;
                    ASSERT(remains > 0 && remains < factory->heap_frags_saved->used_size);
                    factory->heap_frags_saved->used_size = remains;
                }
            }
        }
        break;

    case FACTORY_TMP:
    case FACTORY_HEAP_FRAGS:
	erts_cleanup_offheap(factory->off_heap);
	factory->off_heap->first = NULL;

        bp = factory->heap_frags;
        while (bp != NULL) {
            ErlHeapFragment* next_bp = bp->next;

            ASSERT(bp->off_heap.first == NULL);
            ERTS_HEAP_FREE(factory->alloc_type, (void *) bp,
                           ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
            bp = next_bp;
        }
	break;

    case FACTORY_CLOSED: break;
    default:
	ASSERT(!"Invalid factory mode");
    }
    factory->mode = FACTORY_CLOSED;
#ifdef DEBUG
    factory->p = NULL;
    factory->hp = NULL;
    factory->heap_frags = NULL;
#endif
}
Exemplo n.º 6
0
static void
system_cleanup(int exit_code)
{
    /* No cleanup wanted if ...
     * 1. we are about to do an abnormal exit
     * 2. we haven't finished initializing, or
     * 3. another thread than the main thread is performing the exit
     *    (in threaded non smp case).
     */

    if (exit_code != 0
	|| !erts_initialized
#if defined(USE_THREADS) && !defined(ERTS_SMP)
	|| !erts_equal_tids(main_thread, erts_thr_self())
#endif
	)
	return;

#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
    erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC); /* We never release it... */
#endif

#ifdef HYBRID
    if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                (void *)ma_src_stack);
    if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                (void *)ma_dst_stack);
    if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
                                   (void *)ma_offset_stack);
    ma_src_stack = NULL;
    ma_dst_stack = NULL;
    ma_offset_stack = NULL;
    erts_cleanup_offheap(&erts_global_offheap);
#endif

#if defined(HYBRID) && !defined(INCREMENTAL)
    if (global_heap) {
	ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
		       (void*) global_heap,
		       sizeof(Eterm) * global_heap_sz);
    }
    global_heap = NULL;
#endif

#ifdef INCREMENTAL
    erts_cleanup_incgc();
#endif

#if defined(USE_THREADS)
    exit_async();
#endif
#if HAVE_ERTS_MSEG
    erts_mseg_exit();
#endif

    /*
     * A lot more cleaning could/should have been done...
     */

}