コード例 #1
0
DISPATCH_NOINLINE
static void
_dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
{
	void *page = madvisable_page_base_for_continuation(c);
	if (!page) {
		// page can't be madvised; maybe it contains non-continuations
		return;
	}
	// Are all the continuations in this page unallocated?
	volatile bitmap_t *page_bitmaps;
	get_maps_and_indices_for_continuation((dispatch_continuation_t)page, NULL,
			NULL, (bitmap_t **)&page_bitmaps, NULL);
	unsigned int i;
	for (i = 0; i < BITMAPS_PER_PAGE; i++) {
		if (page_bitmaps[i] != 0) {
			return;
		}
	}
	// They are all unallocated, so we could madvise the page. Try to
	// take ownership of them all.
	int last_locked = 0;
	do {
		if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
				BITMAP_ALL_ONES, relaxed)) {
			// We didn't get one; since there is a cont allocated in
			// the page, we can't madvise. Give up and unlock all.
			goto unlock;
		}
	} while (++last_locked < (signed)BITMAPS_PER_PAGE);
#if DISPATCH_DEBUG
	//fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
	//		"[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
	//		last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
	// Scribble to expose use-after-free bugs
	// madvise (syscall) flushes these stores
	memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif
	(void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
			MADV_FREE));

unlock:
	while (last_locked > 1) {
		page_bitmaps[--last_locked] = BITMAP_C(0);
	}
	if (last_locked) {
		os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
	}
	return;
}
コード例 #2
0
ファイル: allocator.c プロジェクト: Kentzo/AOS-libdispatch
DISPATCH_ALWAYS_INLINE_NDEBUG
static void
mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap,
		unsigned int bitmap_index, volatile bitmap_t *bitmap)
{
#if DISPATCH_DEBUG
	dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP);
#endif
	const bitmap_t mask = BITMAP_C(1) << bitmap_index;
	bitmap_t s, s_new, s_masked;

	if (!bitmap_is_full(*bitmap)) {
		return;
	}
	s_new = *supermap;
	for (;;) {
		// No barriers because supermaps are only advisory, they
		// don't protect access to other memory.
		s = s_new;
		s_masked = s | mask;
		if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) ||
				!bitmap_is_full(*bitmap)) {
			return;
		}
	}
}
コード例 #3
0
ファイル: allocator.c プロジェクト: outbackdingo/uBSD
// Return true if this bit was the last in the bitmap, and it is now all zeroes
DISPATCH_ALWAYS_INLINE_NDEBUG
static bool
bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index,
		bool exclusively)
{
#if DISPATCH_DEBUG
	dispatch_assert(index < CONTINUATIONS_PER_BITMAP);
#endif
	const bitmap_t mask = BITMAP_C(1) << index;
	bitmap_t b;

	if (exclusively == CLEAR_EXCLUSIVELY) {
		if (slowpath((*bitmap & mask) == 0)) {
			DISPATCH_CRASH("Corruption: failed to clear bit exclusively");
		}
	}

	// and-and-fetch
	b = dispatch_atomic_and(bitmap, ~mask, release);
	return b == 0;
}