コード例 #1
0
ファイル: duk_heap_alloc.c プロジェクト: heisewangluo/duktape
DUK_INTERNAL void duk_heap_free(duk_heap *heap) {
	DUK_D(DUK_DPRINT("free heap: %p", (void *) heap));

#if defined(DUK_USE_DEBUG)
	duk_heap_dump_strtab(heap);
#endif

#if defined(DUK_USE_DEBUGGER_SUPPORT)
	/* Detach a debugger if attached (can be called multiple times)
	 * safely.
	 */
	duk_debug_do_detach(heap);
#endif

	/* Execute finalizers before freeing the heap, even for reachable
	 * objects, and regardless of whether or not mark-and-sweep is
	 * enabled.  This gives finalizers the chance to free any native
	 * resources like file handles, allocations made outside Duktape,
	 * etc.
	 *
	 * XXX: this perhaps requires an execution time limit.
	 */
	DUK_D(DUK_DPRINT("execute finalizers before freeing heap"));
#ifdef DUK_USE_MARK_AND_SWEEP
	/* run mark-and-sweep a few times just in case (unreachable
	 * object finalizers run already here)
	 */
	duk_heap_mark_and_sweep(heap, 0);
	duk_heap_mark_and_sweep(heap, 0);
#endif
	duk__free_run_finalizers(heap);

	/* Note: heap->heap_thread, heap->curr_thread, and heap->heap_object
	 * are on the heap allocated list.
	 */

	DUK_D(DUK_DPRINT("freeing heap objects of heap: %p", (void *) heap));
	duk__free_allocated(heap);

#ifdef DUK_USE_REFERENCE_COUNTING
	DUK_D(DUK_DPRINT("freeing refzero list of heap: %p", (void *) heap));
	duk__free_refzero_list(heap);
#endif

#ifdef DUK_USE_MARK_AND_SWEEP
	DUK_D(DUK_DPRINT("freeing mark-and-sweep finalize list of heap: %p", (void *) heap));
	duk__free_markandsweep_finalize_list(heap);
#endif

	DUK_D(DUK_DPRINT("freeing string table of heap: %p", (void *) heap));
	duk__free_stringtable(heap);

	DUK_D(DUK_DPRINT("freeing heap structure: %p", (void *) heap));
	heap->free_func(heap->heap_udata, heap);
}
コード例 #2
0
ファイル: duk_heap_alloc.c プロジェクト: rockybars/duktape
void duk_heap_free(duk_heap *heap) {
	DUK_D(DUK_DPRINT("free heap: %p", heap));

	/* Execute finalizers before freeing the heap, even for reachable
	 * objects, and regardless of whether or not mark-and-sweep is
	 * enabled.  This gives finalizers the chance to free any native
	 * resources like file handles, allocations made outside Duktape,
	 * etc.
	 *
	 * FIXME: this perhaps requires an execution time limit.
	 */
	DUK_D(DUK_DPRINT("execute finalizers before freeing heap"));
#ifdef DUK_USE_MARK_AND_SWEEP
	/* run mark-and-sweep a few times just in case (unreachable
	 * object finalizers run already here)
	 */
	duk_heap_mark_and_sweep(heap, 0);
	duk_heap_mark_and_sweep(heap, 0);
#endif
	duk__free_run_finalizers(heap);

	/* Note: heap->heap_thread, heap->curr_thread, heap->heap_object,
	 * and heap->log_buffer are on the heap allocated list.
	 */

	DUK_D(DUK_DPRINT("freeing heap objects of heap: %p", heap));
	duk__free_allocated(heap);

#ifdef DUK_USE_REFERENCE_COUNTING
	DUK_D(DUK_DPRINT("freeing refzero list of heap: %p", heap));
	duk__free_refzero_list(heap);
#endif

#ifdef DUK_USE_MARK_AND_SWEEP
	DUK_D(DUK_DPRINT("freeing mark-and-sweep finalize list of heap: %p", heap));
	duk__free_markandsweep_finalize_list(heap);
#endif

	DUK_D(DUK_DPRINT("freeing string table of heap: %p", heap));
	duk__free_stringtable(heap);

	DUK_D(DUK_DPRINT("freeing heap structure: %p", heap));
	heap->free_func(heap->alloc_udata, heap);
}
コード例 #3
0
ファイル: duk_heap_memory.c プロジェクト: harold-b/duktape
DUK_LOCAL void duk__run_voluntary_gc(duk_heap *heap) {
	if (DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)) {
		DUK_DD(DUK_DDPRINT("mark-and-sweep in progress -> skip voluntary mark-and-sweep now"));
	} else {
		duk_small_uint_t flags;
		duk_bool_t rc;

		DUK_D(DUK_DPRINT("triggering voluntary mark-and-sweep"));
		flags = 0;
		rc = duk_heap_mark_and_sweep(heap, flags);
		DUK_UNREF(rc);
	}
}
コード例 #4
0
int duk_builtin_duk_object_gc(duk_context *ctx) {
#ifdef DUK_USE_MARK_AND_SWEEP
	duk_hthread *thr = (duk_hthread *) ctx;
	int flags;
	int rc;

	flags = duk_get_int(ctx, 0);
	rc = duk_heap_mark_and_sweep(thr->heap, flags);
	duk_push_int(ctx, rc);
	return 1;
#else
	return 0;
#endif
}
コード例 #5
0
ファイル: duk_heap_memory.c プロジェクト: fatcerberus/duktape
DUK_LOCAL DUK_INLINE void duk__check_voluntary_gc(duk_heap *heap) {
	if (DUK_UNLIKELY(--(heap)->ms_trigger_counter < 0)) {
#if defined(DUK_USE_DEBUG)
		if (heap->ms_prevent_count == 0) {
			DUK_D(DUK_DPRINT("triggering voluntary mark-and-sweep"));
		} else {
			DUK_DD(DUK_DDPRINT("gc blocked -> skip voluntary mark-and-sweep now"));
		}
#endif

		/* Prevention checks in the call target handle cases where
		 * voluntary GC is not allowed.  The voluntary GC trigger
		 * counter is only rewritten if mark-and-sweep actually runs.
		 */
		duk_heap_mark_and_sweep(heap, DUK_MS_FLAG_VOLUNTARY /*flags*/);
	}
}
コード例 #6
0
ファイル: duk_api_memory.c プロジェクト: ndob/duktape
void duk_gc(duk_context *ctx, int flags) {
#ifdef DUK_USE_MARK_AND_SWEEP
	duk_hthread *thr = (duk_hthread *) ctx;
	duk_heap *heap;

	DUK_UNREF(flags);

	if (!ctx) {
		return;
	}
	heap = thr->heap;
	DUK_ASSERT(heap != NULL);

	DUK_DPRINT("mark-and-sweep requested by application");
	duk_heap_mark_and_sweep(heap, 0);
#else
	DUK_DPRINT("mark-and-sweep requested by application but mark-and-sweep not enabled, ignoring");
#endif
}
コード例 #7
0
DUK_INTERNAL duk_ret_t duk_bi_duktape_object_gc(duk_context *ctx) {
#ifdef DUK_USE_MARK_AND_SWEEP
	duk_hthread *thr = (duk_hthread *) ctx;
	duk_small_uint_t flags;
	duk_bool_t rc;

	flags = (duk_small_uint_t) duk_get_uint(ctx, 0);
	rc = duk_heap_mark_and_sweep(thr->heap, flags);

	/* XXX: Not sure what the best return value would be in the API.
	 * Return a boolean for now.  Note that rc == 0 is success (true).
	 */
	duk_push_boolean(ctx, !rc);
	return 1;
#else
	DUK_UNREF(ctx);
	return 0;
#endif
}
コード例 #8
0
ファイル: duk_api_memory.c プロジェクト: 3009420/civetweb
DUK_EXTERNAL void duk_gc(duk_context *ctx, duk_uint_t flags) {
#ifdef DUK_USE_MARK_AND_SWEEP
	duk_hthread *thr = (duk_hthread *) ctx;
	duk_heap *heap;

	DUK_UNREF(flags);

	/* NULL accepted */
	if (!ctx) {
		return;
	}
	DUK_ASSERT_CTX_VALID(ctx);
	heap = thr->heap;
	DUK_ASSERT(heap != NULL);

	DUK_D(DUK_DPRINT("mark-and-sweep requested by application"));
	duk_heap_mark_and_sweep(heap, 0);
#else
	DUK_D(DUK_DPRINT("mark-and-sweep requested by application but mark-and-sweep not enabled, ignoring"));
	DUK_UNREF(ctx);
	DUK_UNREF(flags);
#endif
}
コード例 #9
0
ファイル: duk_heap_memory.c プロジェクト: harold-b/duktape
DUK_INTERNAL void *duk_heap_mem_alloc(duk_heap *heap, duk_size_t size) {
	void *res;
	duk_bool_t rc;
	duk_small_int_t i;

	DUK_ASSERT(heap != NULL);
	DUK_ASSERT_DISABLE(size >= 0);

	/*
	 *  Voluntary periodic GC (if enabled)
	 */

	DUK__VOLUNTARY_PERIODIC_GC(heap);

	/*
	 *  First attempt
	 */

#if defined(DUK_USE_GC_TORTURE)
	/* simulate alloc failure on every alloc (except when mark-and-sweep is running) */
	if (!DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)) {
		DUK_DDD(DUK_DDDPRINT("gc torture enabled, pretend that first alloc attempt fails"));
		res = NULL;
		DUK_UNREF(res);
		goto skip_attempt;
	}
#endif
	res = heap->alloc_func(heap->heap_udata, size);
	if (res || size == 0) {
		/* for zero size allocations NULL is allowed */
		return res;
	}
#if defined(DUK_USE_GC_TORTURE)
 skip_attempt:
#endif

	DUK_D(DUK_DPRINT("first alloc attempt failed, attempt to gc and retry"));

	/*
	 *  Avoid a GC if GC is already running.  This can happen at a late
	 *  stage in a GC when we try to e.g. resize the stringtable
	 *  or compact objects.
	 */

	if (DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)) {
		DUK_D(DUK_DPRINT("duk_heap_mem_alloc() failed, gc in progress (gc skipped), alloc size %ld", (long) size));
		return NULL;
	}

	/*
	 *  Retry with several GC attempts.  Initial attempts are made without
	 *  emergency mode; later attempts use emergency mode which minimizes
	 *  memory allocations forcibly.
	 */

	for (i = 0; i < DUK_HEAP_ALLOC_FAIL_MARKANDSWEEP_LIMIT; i++) {
		duk_small_uint_t flags;

		flags = 0;
		if (i >= DUK_HEAP_ALLOC_FAIL_MARKANDSWEEP_EMERGENCY_LIMIT - 1) {
			flags |= DUK_MS_FLAG_EMERGENCY;
		}

		rc = duk_heap_mark_and_sweep(heap, flags);
		DUK_UNREF(rc);

		res = heap->alloc_func(heap->heap_udata, size);
		if (res) {
			DUK_D(DUK_DPRINT("duk_heap_mem_alloc() succeeded after gc (pass %ld), alloc size %ld",
			                 (long) (i + 1), (long) size));
			return res;
		}
	}

	DUK_D(DUK_DPRINT("duk_heap_mem_alloc() failed even after gc, alloc size %ld", (long) size));
	return NULL;
}
コード例 #10
0
ファイル: duk_heap_memory.c プロジェクト: harold-b/duktape
DUK_INTERNAL void *duk_heap_mem_realloc_indirect(duk_heap *heap, duk_mem_getptr cb, void *ud, duk_size_t newsize) {
	void *res;
	duk_bool_t rc;
	duk_small_int_t i;

	DUK_ASSERT(heap != NULL);
	DUK_ASSERT_DISABLE(newsize >= 0);

	/*
	 *  Voluntary periodic GC (if enabled)
	 */

	DUK__VOLUNTARY_PERIODIC_GC(heap);

	/*
	 *  First attempt
	 */

#if defined(DUK_USE_GC_TORTURE)
	/* simulate alloc failure on every realloc (except when mark-and-sweep is running) */
	if (!DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)) {
		DUK_DDD(DUK_DDDPRINT("gc torture enabled, pretend that first indirect realloc attempt fails"));
		res = NULL;
		DUK_UNREF(res);
		goto skip_attempt;
	}
#endif
	res = heap->realloc_func(heap->heap_udata, cb(heap, ud), newsize);
	if (res || newsize == 0) {
		/* for zero size allocations NULL is allowed */
		return res;
	}
#if defined(DUK_USE_GC_TORTURE)
 skip_attempt:
#endif

	DUK_D(DUK_DPRINT("first indirect realloc attempt failed, attempt to gc and retry"));

	/*
	 *  Avoid a GC if GC is already running.  See duk_heap_mem_alloc().
	 */

	if (DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)) {
		DUK_D(DUK_DPRINT("duk_heap_mem_realloc_indirect() failed, gc in progress (gc skipped), alloc size %ld", (long) newsize));
		return NULL;
	}

	/*
	 *  Retry with several GC attempts.  Initial attempts are made without
	 *  emergency mode; later attempts use emergency mode which minimizes
	 *  memory allocations forcibly.
	 */

	for (i = 0; i < DUK_HEAP_ALLOC_FAIL_MARKANDSWEEP_LIMIT; i++) {
		duk_small_uint_t flags;

#if defined(DUK_USE_ASSERTIONS)
		void *ptr_pre;  /* ptr before mark-and-sweep */
		void *ptr_post;
#endif

#if defined(DUK_USE_ASSERTIONS)
		ptr_pre = cb(heap, ud);
#endif
		flags = 0;
		if (i >= DUK_HEAP_ALLOC_FAIL_MARKANDSWEEP_EMERGENCY_LIMIT - 1) {
			flags |= DUK_MS_FLAG_EMERGENCY;
		}

		rc = duk_heap_mark_and_sweep(heap, flags);
		DUK_UNREF(rc);
#if defined(DUK_USE_ASSERTIONS)
		ptr_post = cb(heap, ud);
		if (ptr_pre != ptr_post) {
			/* useful for debugging */
			DUK_DD(DUK_DDPRINT("note: base pointer changed by mark-and-sweep: %p -> %p",
			                   (void *) ptr_pre, (void *) ptr_post));
		}
#endif

		/* Note: key issue here is to re-lookup the base pointer on every attempt.
		 * The pointer being reallocated may change after every mark-and-sweep.
		 */

		res = heap->realloc_func(heap->heap_udata, cb(heap, ud), newsize);
		if (res || newsize == 0) {
			DUK_D(DUK_DPRINT("duk_heap_mem_realloc_indirect() succeeded after gc (pass %ld), alloc size %ld",
			                 (long) (i + 1), (long) newsize));
			return res;
		}
	}

	DUK_D(DUK_DPRINT("duk_heap_mem_realloc_indirect() failed even after gc, alloc size %ld", (long) newsize));
	return NULL;
}
コード例 #11
0
ファイル: duk_heap_refcount.c プロジェクト: cherry-wb/duktape
static void duk__refzero_free_pending(duk_hthread *thr) {
	duk_heaphdr *h1, *h2;
	duk_heap *heap;
	duk_int_t count = 0;

	DUK_ASSERT(thr != NULL);
	DUK_ASSERT(thr->heap != NULL);
	heap = thr->heap;
	DUK_ASSERT(heap != NULL);

	/*
	 *  Detect recursive invocation
	 */

	if (DUK_HEAP_HAS_REFZERO_FREE_RUNNING(heap)) {
		DUK_DDD(DUK_DDDPRINT("refzero free running, skip run"));
		return;
	}

	/*
	 *  Churn refzero_list until empty
	 */

	DUK_HEAP_SET_REFZERO_FREE_RUNNING(heap);
	while (heap->refzero_list) {
		duk_hobject *obj;
		duk_bool_t rescued = 0;

		/*
		 *  Pick an object from the head (don't remove yet).
		 */

		h1 = heap->refzero_list;
		obj = (duk_hobject *) h1;
		DUK_DD(DUK_DDPRINT("refzero processing %p: %!O", (void *) h1, (duk_heaphdr *) h1));
		DUK_ASSERT(DUK_HEAPHDR_GET_PREV(h1) == NULL);
		DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(h1) == DUK_HTYPE_OBJECT);  /* currently, always the case */

		/*
		 *  Finalizer check.
		 *
		 *  Note: running a finalizer may have arbitrary side effects, e.g.
		 *  queue more objects on refzero_list (tail), or even trigger a
		 *  mark-and-sweep.
		 *
		 *  Note: quick reject check should match vast majority of
		 *  objects and must be safe (not throw any errors, ever).
		 */

		/* XXX: If object has FINALIZED, it was finalized by mark-and-sweep on
		 * its previous run.  Any point in running finalizer again here?  If
		 * finalization semantics is changed so that finalizer is only run once,
		 * checking for FINALIZED would happen here.
		 */

		/* A finalizer is looked up from the object and up its prototype chain
		 * (which allows inherited finalizers).
		 */
		if (duk_hobject_hasprop_raw(thr, obj, DUK_HTHREAD_STRING_INT_FINALIZER(thr))) {
			DUK_DDD(DUK_DDDPRINT("object has a finalizer, run it"));

			DUK_ASSERT(h1->h_refcount == 0);
			h1->h_refcount++;  /* bump refcount to prevent refzero during finalizer processing */

			duk_hobject_run_finalizer(thr, obj);  /* must never longjmp */

			h1->h_refcount--;  /* remove artificial bump */
			DUK_ASSERT_DISABLE(h1->h_refcount >= 0);  /* refcount is unsigned, so always true */

			if (h1->h_refcount != 0) {
				DUK_DDD(DUK_DDDPRINT("-> object refcount after finalization non-zero, object will be rescued"));
				rescued = 1;
			} else {
				DUK_DDD(DUK_DDDPRINT("-> object refcount still zero after finalization, object will be freed"));
			}
		}

  		/* Refzero head is still the same.  This is the case even if finalizer
		 * inserted more refzero objects; they are inserted to the tail.
		 */
		DUK_ASSERT(h1 == heap->refzero_list);

		/*
		 *  Remove the object from the refzero list.  This cannot be done
		 *  before a possible finalizer has been executed; the finalizer
		 *  may trigger a mark-and-sweep, and mark-and-sweep must be able
		 *  to traverse a complete refzero_list.
		 */

		h2 = DUK_HEAPHDR_GET_NEXT(h1);
		if (h2) {
			DUK_HEAPHDR_SET_PREV(h2, NULL);  /* not strictly necessary */
			heap->refzero_list = h2;
		} else {
			heap->refzero_list = NULL;
			heap->refzero_list_tail = NULL;
		}

		/*
		 *  Rescue or free.
		 */

		if (rescued) {
			/* yes -> move back to heap allocated */
			DUK_DD(DUK_DDPRINT("object rescued during refcount finalization: %p", (void *) h1));
			DUK_HEAPHDR_SET_PREV(h1, NULL);
			DUK_HEAPHDR_SET_NEXT(h1, heap->heap_allocated);
			heap->heap_allocated = h1;
		} else {
			/* no -> decref members, then free */
			duk__refcount_finalize_hobject(thr, obj);
			duk_heap_free_heaphdr_raw(heap, h1);
		}

		count++;
	}
	DUK_HEAP_CLEAR_REFZERO_FREE_RUNNING(heap);

	DUK_DDD(DUK_DDDPRINT("refzero processed %ld objects", (long) count));

	/*
	 *  Once the whole refzero cascade has been freed, check for
	 *  a voluntary mark-and-sweep.
	 */

#if defined(DUK_USE_MARK_AND_SWEEP) && defined(DUK_USE_VOLUNTARY_GC)
	/* 'count' is more or less comparable to normal trigger counter update
	 * which happens in memory block (re)allocation.
	 */
	heap->mark_and_sweep_trigger_counter -= count;
	if (heap->mark_and_sweep_trigger_counter <= 0) {
		duk_bool_t rc;
		duk_small_uint_t flags = 0;  /* not emergency */
		DUK_D(DUK_DPRINT("refcount triggering mark-and-sweep"));
		rc = duk_heap_mark_and_sweep(heap, flags);
		DUK_UNREF(rc);
		DUK_D(DUK_DPRINT("refcount triggered mark-and-sweep => rc %ld", (long) rc));
	}
#endif  /* DUK_USE_MARK_AND_SWEEP && DUK_USE_VOLUNTARY_GC */
}
コード例 #12
0
ファイル: duk_heap_memory.c プロジェクト: andoma/duktape
void *duk_heap_mem_realloc(duk_heap *heap, void *ptr, size_t newsize) {
	void *res;
	int rc;
	int i;

	DUK_ASSERT(heap != NULL);
	/* ptr may be NULL */
	DUK_ASSERT_DISABLE(newsize >= 0);

	/*
	 *  Voluntary periodic GC (if enabled)
	 */

	DUK__VOLUNTARY_PERIODIC_GC(heap);

	/*
	 *  First attempt
	 */

#ifdef DUK_USE_GC_TORTURE
	/* simulate alloc failure on every realloc (except when mark-and-sweep is running) */
	if (!DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)) {
		DUK_DDD(DUK_DDDPRINT("gc torture enabled, pretend that first realloc attempt fails"));
		res = NULL;
		DUK_UNREF(res);
		goto skip_attempt;
	}
#endif
	res = heap->realloc_func(heap->alloc_udata, ptr, newsize);
	if (res || newsize == 0) {
		/* for zero size allocations NULL is allowed */
		return res;
	}
#ifdef DUK_USE_GC_TORTURE
 skip_attempt:
#endif

	DUK_D(DUK_DPRINT("first realloc attempt failed, attempt to gc and retry"));

	/*
	 *  Avoid a GC if GC is already running.  See duk_heap_mem_alloc().
	 */

	if (DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)) {
		DUK_D(DUK_DPRINT("duk_heap_mem_realloc() failed, gc in progress (gc skipped), alloc size %d", newsize));
		return NULL;
	}

	/*
	 *  Retry with several GC attempts.  Initial attempts are made without
	 *  emergency mode; later attempts use emergency mode which minimizes
	 *  memory allocations forcibly.
	 */

	for (i = 0; i < DUK_HEAP_ALLOC_FAIL_MARKANDSWEEP_LIMIT; i++) {
		int flags;

		flags = 0;
		if (i >= DUK_HEAP_ALLOC_FAIL_MARKANDSWEEP_EMERGENCY_LIMIT - 1) {
			flags |= DUK_MS_FLAG_EMERGENCY;
		}

		rc = duk_heap_mark_and_sweep(heap, flags);
		DUK_UNREF(rc);

		res = heap->realloc_func(heap->alloc_udata, ptr, newsize);
		if (res) {
			DUK_D(DUK_DPRINT("duk_heap_mem_realloc() succeeded after gc (pass %d), alloc size %d",
			                 i + 1, newsize));
			return res;
		}
	}

	DUK_D(DUK_DPRINT("duk_heap_mem_realloc() failed even after gc, alloc size %d", newsize));
	return NULL;
}
コード例 #13
0
DUK_INTERNAL void duk_heap_free(duk_heap *heap) {
	DUK_D(DUK_DPRINT("free heap: %p", (void *) heap));

#if defined(DUK_USE_DEBUG)
	duk_heap_dump_strtab(heap);
#endif

#if defined(DUK_USE_DEBUGGER_SUPPORT)
	/* Detach a debugger if attached (can be called multiple times)
	 * safely.
	 */
	/* XXX: Add a flag to reject an attempt to re-attach?  Otherwise
	 * the detached callback may immediately reattach.
	 */
	duk_debug_do_detach(heap);
#endif

	/* Execute finalizers before freeing the heap, even for reachable
	 * objects, and regardless of whether or not mark-and-sweep is
	 * enabled.  This gives finalizers the chance to free any native
	 * resources like file handles, allocations made outside Duktape,
	 * etc.  This is quite tricky to get right, so that all finalizer
	 * guarantees are honored.
	 *
	 * XXX: this perhaps requires an execution time limit.
	 */
	DUK_D(DUK_DPRINT("execute finalizers before freeing heap"));
#if defined(DUK_USE_MARK_AND_SWEEP)
	/* Run mark-and-sweep a few times just in case (unreachable object
	 * finalizers run already here).  The last round must rescue objects
	 * from the previous round without running any more finalizers.  This
	 * ensures rescued objects get their FINALIZED flag cleared so that
	 * their finalizer is called once more in forced finalization to
	 * satisfy finalizer guarantees.  However, we don't want to run any
	 * more finalizer because that'd required one more loop, and so on.
	 */
	DUK_D(DUK_DPRINT("forced gc #1 in heap destruction"));
	duk_heap_mark_and_sweep(heap, 0);
	DUK_D(DUK_DPRINT("forced gc #2 in heap destruction"));
	duk_heap_mark_and_sweep(heap, 0);
	DUK_D(DUK_DPRINT("forced gc #3 in heap destruction (don't run finalizers)"));
	duk_heap_mark_and_sweep(heap, DUK_MS_FLAG_SKIP_FINALIZERS);  /* skip finalizers; queue finalizable objects to heap_allocated */
#endif

	DUK_HEAP_SET_FINALIZER_NORESCUE(heap);  /* rescue no longer supported */
	duk__free_run_finalizers(heap);

	/* Note: heap->heap_thread, heap->curr_thread, and heap->heap_object
	 * are on the heap allocated list.
	 */

	DUK_D(DUK_DPRINT("freeing heap objects of heap: %p", (void *) heap));
	duk__free_allocated(heap);

#if defined(DUK_USE_REFERENCE_COUNTING)
	DUK_D(DUK_DPRINT("freeing refzero list of heap: %p", (void *) heap));
	duk__free_refzero_list(heap);
#endif

#if defined(DUK_USE_MARK_AND_SWEEP)
	DUK_D(DUK_DPRINT("freeing mark-and-sweep finalize list of heap: %p", (void *) heap));
	duk__free_markandsweep_finalize_list(heap);
#endif

	DUK_D(DUK_DPRINT("freeing string table of heap: %p", (void *) heap));
	duk__free_stringtable(heap);

	DUK_D(DUK_DPRINT("freeing heap structure: %p", (void *) heap));
	heap->free_func(heap->heap_udata, heap);
}