コード例 #1
0
void
_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy)
{
	semaphore_t tmp = MACH_PORT_NULL;

	_dispatch_fork_becomes_unsafe();

	// lazily allocate the semaphore port

	// Someday:
	// 1) Switch to a doubly-linked FIFO in user-space.
	// 2) User-space timers for the timeout.

#if DISPATCH_USE_OS_SEMAPHORE_CACHE
	if (policy == _DSEMA4_POLICY_FIFO) {
		tmp = (_dispatch_sema4_t)os_get_cached_semaphore();
		if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) {
			os_put_cached_semaphore((os_semaphore_t)tmp);
		}
		return;
	}
#endif

	kern_return_t kr = semaphore_create(mach_task_self(), &tmp, policy, 0);
	DISPATCH_SEMAPHORE_VERIFY_KR(kr);

	if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) {
		kr = semaphore_destroy(mach_task_self(), tmp);
		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
	}
}
コード例 #2
0
DISPATCH_NOINLINE
static void
_dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
{
	void *page = madvisable_page_base_for_continuation(c);
	if (!page) {
		// page can't be madvised; maybe it contains non-continuations
		return;
	}
	// Are all the continuations in this page unallocated?
	volatile bitmap_t *page_bitmaps;
	get_maps_and_indices_for_continuation((dispatch_continuation_t)page, NULL,
			NULL, (bitmap_t **)&page_bitmaps, NULL);
	unsigned int i;
	for (i = 0; i < BITMAPS_PER_PAGE; i++) {
		if (page_bitmaps[i] != 0) {
			return;
		}
	}
	// They are all unallocated, so we could madvise the page. Try to
	// take ownership of them all.
	int last_locked = 0;
	do {
		if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
				BITMAP_ALL_ONES, relaxed)) {
			// We didn't get one; since there is a cont allocated in
			// the page, we can't madvise. Give up and unlock all.
			goto unlock;
		}
	} while (++last_locked < (signed)BITMAPS_PER_PAGE);
#if DISPATCH_DEBUG
	//fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
	//		"[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
	//		last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
	// Scribble to expose use-after-free bugs
	// madvise (syscall) flushes these stores
	memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif
	(void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
			MADV_FREE));

unlock:
	while (last_locked > 1) {
		page_bitmaps[--last_locked] = BITMAP_C(0);
	}
	if (last_locked) {
		os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
	}
	return;
}
コード例 #3
0
DISPATCH_NOINLINE
static void
_dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
{
#if HAVE_MACH
	kern_return_t kr;
	mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE;
	mach_vm_offset_t vm_mask = ~MAGAZINE_MASK;
	mach_vm_address_t vm_addr = vm_page_size;
	while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size,
			vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH),
			MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT))) {
		if (kr != KERN_NO_SPACE) {
			DISPATCH_CLIENT_CRASH(kr, "Could not allocate heap");
		}
		_dispatch_temporary_resource_shortage();
		vm_addr = vm_page_size;
	}
	uintptr_t aligned_region = (uintptr_t)vm_addr;
#else // HAVE_MACH
	const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE;
	void *region_p;
	while (!dispatch_assume((region_p = mmap(NULL, region_sz,
			PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE,
			VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) {
		_dispatch_temporary_resource_shortage();
	}
	uintptr_t region = (uintptr_t)region_p;
	uintptr_t region_end = region + region_sz;
	uintptr_t aligned_region, aligned_region_end;
	uintptr_t bottom_slop_len, top_slop_len;
	// Realign if needed; find the slop at top/bottom to unmap
	if ((region & ~(MAGAZINE_MASK)) == 0) {
		bottom_slop_len = 0;
		aligned_region = region;
		aligned_region_end = region_end - BYTES_PER_MAGAZINE;
		top_slop_len = BYTES_PER_MAGAZINE;
	} else {
		aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE;
		aligned_region_end = aligned_region +
				(MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
		bottom_slop_len = aligned_region - region;
		top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len;
	}
#if DISPATCH_DEBUG
	// Double-check our math.
	dispatch_assert(aligned_region % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
	dispatch_assert(aligned_region % vm_kernel_page_size == 0);
	dispatch_assert(aligned_region_end % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end % vm_kernel_page_size == 0);
	dispatch_assert(aligned_region_end > aligned_region);
	dispatch_assert(top_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
	dispatch_assert(bottom_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end + top_slop_len == region_end);
	dispatch_assert(region + bottom_slop_len == aligned_region);
	dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
			MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len,
				PROT_NONE));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)aligned_region_end,
				top_slop_len, PROT_NONE));
	}
#else
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)aligned_region_end,
				top_slop_len));
	}
#endif // DISPATCH_DEBUG
#endif // HAVE_MACH

	if (!os_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
			relaxed)) {
		// If we lost the race to link in the new region, unmap the whole thing.
#if DISPATCH_DEBUG
		(void)dispatch_assume_zero(mprotect((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE));
#else
		(void)dispatch_assume_zero(munmap((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE));
#endif
	}
}