Esempio n. 1
0
uint64_t
_dispatch_get_nanoseconds(void)
{
	struct timeval now;
	int r = gettimeofday(&now, NULL);
	dispatch_assert_zero(r);
	dispatch_assert(sizeof(NSEC_PER_SEC) == 8);
	dispatch_assert(sizeof(NSEC_PER_USEC) == 8);
	return now.tv_sec * NSEC_PER_SEC + now.tv_usec * NSEC_PER_USEC;
}
Esempio n. 2
0
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static dispatch_continuation_t
continuation_address(struct dispatch_magazine_s *magazine,
		unsigned int supermap, unsigned int map, unsigned int index)
{
#if DISPATCH_DEBUG
	dispatch_assert(supermap < SUPERMAPS_PER_MAGAZINE);
	dispatch_assert(map < BITMAPS_PER_SUPERMAP);
	dispatch_assert(index < CONTINUATIONS_PER_BITMAP);
#endif
	return (dispatch_continuation_t)&magazine->conts[supermap][map][index];
}
void
_dispatch_vtable_init(void)
{
#if USE_OBJC
	// ObjC classes and dispatch vtables are co-located via linker order and
	// alias files, verify correct layout during initialization rdar://10640168
	DISPATCH_OBJC_CLASS_DECL(semaphore);
	dispatch_assert((char*)DISPATCH_VTABLE(semaphore) -
			(char*)DISPATCH_OBJC_CLASS(semaphore) == 0);
	dispatch_assert((char*)&DISPATCH_CONCAT(_,DISPATCH_CLASS(semaphore_vtable))
			- (char*)DISPATCH_OBJC_CLASS(semaphore) ==
			sizeof(_os_object_class_s));
#endif // USE_OBJC
}
Esempio n. 4
0
// Bitmap that controls the first few continuations in the same page as
// the continuations controlled by the passed bitmap. Undefined results if the
// passed bitmap controls continuations in the first page.
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static bitmap_t *
first_bitmap_in_same_page(bitmap_t *b)
{
#if DISPATCH_DEBUG
	struct dispatch_magazine_s *m;
	m = magazine_for_continuation((void*)b);
	dispatch_assert(b >= &m->maps[0][0]);
	dispatch_assert(b <  &m->maps[SUPERMAPS_PER_MAGAZINE]
			[BITMAPS_PER_SUPERMAP]);
#endif
	const uintptr_t PAGE_BITMAP_MASK = (BITMAPS_PER_PAGE *
			BYTES_PER_BITMAP) - 1;
	return (bitmap_t *)((uintptr_t)b & ~PAGE_BITMAP_MASK);
}
Esempio n. 5
0
DISPATCH_ALWAYS_INLINE_NDEBUG
static void
mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap,
		unsigned int bitmap_index, volatile bitmap_t *bitmap)
{
#if DISPATCH_DEBUG
	dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP);
#endif
	const bitmap_t mask = BITMAP_C(1) << bitmap_index;
	bitmap_t s, s_new, s_masked;

	if (!bitmap_is_full(*bitmap)) {
		return;
	}
	s_new = *supermap;
	for (;;) {
		// No barriers because supermaps are only advisory, they
		// don't protect access to other memory.
		s = s_new;
		s_masked = s | mask;
		if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) ||
				!bitmap_is_full(*bitmap)) {
			return;
		}
	}
}
Esempio n. 6
0
static void
_dispatch_malloc_init(void)
{
	_dispatch_ccache_zone = malloc_create_zone(0, 0);
	dispatch_assert(_dispatch_ccache_zone);
	malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations");
}
Esempio n. 7
0
DISPATCH_ALWAYS_INLINE
static bitmap_t *
last_found_page(void)
{
	dispatch_assert(_dispatch_main_heap);
	unsigned int cpu = _dispatch_cpu_number();
	return _dispatch_main_heap[cpu].header.last_found_page;
}
Esempio n. 8
0
void
dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
	dispatch_function_t handler)
{
	dispatch_assert(!ds->ds_is_legacy);
	dispatch_barrier_async_f((dispatch_queue_t)ds,
							 handler, _dispatch_source_set_cancel_handler_f);
}
Esempio n. 9
0
// max_index is the 0-based position of the most significant bit that is
// allowed to be set.
DISPATCH_ALWAYS_INLINE_NDEBUG
static unsigned int
bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap,
		unsigned int max_index)
{
	// No barriers needed in acquire path: the just-allocated
	// continuation is "uninitialized", so the caller shouldn't
	// load from it before storing, so we don't need to guard
	// against reordering those loads.
#if defined(__x86_64__) // TODO rdar://problem/11477843
	dispatch_assert(sizeof(*bitmap) == sizeof(uint64_t));
	return dispatch_atomic_set_first_bit((volatile uint64_t *)bitmap,max_index);
#else
	dispatch_assert(sizeof(*bitmap) == sizeof(uint32_t));
	return dispatch_atomic_set_first_bit((volatile uint32_t *)bitmap,max_index);
#endif
}
Esempio n. 10
0
void
dispatch_source_set_event_handler(dispatch_source_t ds, dispatch_block_t handler)
{
	dispatch_assert(!ds->ds_is_legacy);
	handler = _dispatch_Block_copy(handler);
	dispatch_barrier_async_f((dispatch_queue_t)ds,
		handler, _dispatch_source_set_event_handler2);
}
Esempio n. 11
0
DISPATCH_ALWAYS_INLINE
static void
set_last_found_page(bitmap_t *val)
{
	dispatch_assert(_dispatch_main_heap);
	unsigned int cpu = _dispatch_cpu_number();
	_dispatch_main_heap[cpu].header.last_found_page = val;
}
static void
_dispatch_alloc_init(void)
{
	// Double-check our math. These are all compile time checks and don't
	// generate code.

	dispatch_assert(sizeof(bitmap_t) == BYTES_PER_BITMAP);
	dispatch_assert(sizeof(bitmap_t) == BYTES_PER_SUPERMAP);
	dispatch_assert(sizeof(struct dispatch_magazine_header_s) ==
			SIZEOF_HEADER);

	dispatch_assert(sizeof(struct dispatch_continuation_s) <=
			DISPATCH_CONTINUATION_SIZE);

	// Magazines should be the right size, so they pack neatly into an array of
	// heaps.
	dispatch_assert(sizeof(struct dispatch_magazine_s) == BYTES_PER_MAGAZINE);

	// The header and maps sizes should match what we computed.
	dispatch_assert(SIZEOF_HEADER ==
			sizeof(((struct dispatch_magazine_s *)0x0)->header));
	dispatch_assert(SIZEOF_MAPS ==
			sizeof(((struct dispatch_magazine_s *)0x0)->maps));

	// The main array of continuations should start at the second page,
	// self-aligned.
	dispatch_assert(offsetof(struct dispatch_magazine_s, conts) %
			(CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0);
	dispatch_assert(offsetof(struct dispatch_magazine_s, conts) ==
			DISPATCH_ALLOCATOR_PAGE_SIZE);

#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
	// The continuations in the first page should actually fit within the first
	// page.
	dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) <
			DISPATCH_ALLOCATOR_PAGE_SIZE);
	dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) %
			DISPATCH_CONTINUATION_SIZE == 0);
	dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) +
			sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) ==
					DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
	// Make sure our alignment will be correct: that is, that we are correctly
	// aligning to both.
	dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
			ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
	dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
			ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
}
Esempio n. 13
0
// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
static void
_dispatch_source_set_cancel_handler2(void *context)
{
	dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
	dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable);
	
	if (ds->ds_cancel_is_block && ds->ds_cancel_handler) {
		Block_release(ds->ds_cancel_handler);
	}
	ds->ds_cancel_handler = context;
	ds->ds_cancel_is_block = true;
}
// max_index is the 0-based position of the most significant bit that is
// allowed to be set.
DISPATCH_ALWAYS_INLINE_NDEBUG
static unsigned int
bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap,
		unsigned int max_index)
{
	// No barriers needed in acquire path: the just-allocated
	// continuation is "uninitialized", so the caller shouldn't
	// load from it before storing, so we don't need to guard
	// against reordering those loads.
	dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long));
	return os_atomic_set_first_bit(bitmap, max_index);
}
Esempio n. 15
0
uint64_t
_dispatch_get_nanoseconds(void)
{
#if !TARGET_OS_WIN32
	struct timeval now;
	int r = gettimeofday(&now, NULL);
	dispatch_assert_zero(r);
	dispatch_assert(sizeof(NSEC_PER_SEC) == 8);
	dispatch_assert(sizeof(NSEC_PER_USEC) == 8);
	return (uint64_t)now.tv_sec * NSEC_PER_SEC +
			(uint64_t)now.tv_usec * NSEC_PER_USEC;
#else /* TARGET_OS_WIN32 */
	// FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC).
	FILETIME ft;
	ULARGE_INTEGER li;
	GetSystemTimeAsFileTime(&ft);
	li.LowPart = ft.dwLowDateTime;
	li.HighPart = ft.dwHighDateTime;
	return li.QuadPart * 100ull;
#endif /* TARGET_OS_WIN32 */
}
Esempio n. 16
0
static void
_dispatch_source_set_cancel_handler_f(void *context)
{
	dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
	dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable);
	
#ifdef __BLOCKS__
	if (ds->ds_cancel_is_block && ds->ds_cancel_handler) {
		Block_release(ds->ds_cancel_handler);
	}
#endif
	ds->ds_cancel_handler = context;
	ds->ds_cancel_is_block = false;
}
Esempio n. 17
0
static inline voucher_t
_voucher_alloc(mach_voucher_attr_recipe_size_t extra)
{
	voucher_t voucher;
	size_t voucher_size = sizeof(voucher_s) + extra;
	voucher = (voucher_t)_os_object_alloc_realized(VOUCHER_CLASS, voucher_size);
#if VOUCHER_ENABLE_RECIPE_OBJECTS
	voucher->v_recipe_extra_size = extra;
	voucher->v_recipe_extra_offset = voucher_size - extra;
#else
	dispatch_assert(!extra);
#endif
	_dispatch_voucher_debug("alloc", voucher);
	return voucher;
}
Esempio n. 18
0
// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
static void
_dispatch_source_set_event_handler2(void *context)
{
	struct Block_layout *bl = context;

	dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
	dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable);
	
	if (ds->ds_handler_is_block && ds->ds_handler_ctxt) {
		Block_release(ds->ds_handler_ctxt);
	}
	ds->ds_handler_func = bl ? (void *)bl->invoke : NULL;
	ds->ds_handler_ctxt = bl;
	ds->ds_handler_is_block = true;
}
Esempio n. 19
0
dispatch_source_t
dispatch_source_create(dispatch_source_type_t type,
	uintptr_t handle,
	uintptr_t mask,
	dispatch_queue_t q)
{
	dispatch_source_t ds = NULL;
	static char source_label[sizeof(ds->dq_label)] = "source";

	// input validation
	if (type == NULL || (mask & ~type->mask)) {
		goto out_bad;
	}

	ds = calloc(1ul, sizeof(struct dispatch_source_s));
	if (slowpath(!ds)) {
		goto out_bad;
	}

	// Initialize as a queue first, then override some settings below.
	_dispatch_queue_init((dispatch_queue_t)ds);
	memcpy(ds->dq_label, source_label, sizeof(source_label));

	// Dispatch Object
	ds->do_vtable = &_dispatch_source_kevent_vtable;
	ds->do_ref_cnt++; // the reference the manger queue holds
	ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL;
	// do_targetq will be retained below, past point of no-return
	ds->do_targetq = q;

	if (slowpath(!type->init(ds, type, handle, mask, q))) {
		goto out_bad;
	}

	dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder));
#if DISPATCH_DEBUG
	dispatch_debug(as_do(ds), __FUNCTION__);
#endif

	_dispatch_retain(as_do(ds->do_targetq));
	return ds;
	
out_bad:
	free(ds);
	return NULL;
}
Esempio n. 20
0
DISPATCH_ALWAYS_INLINE_NDEBUG
static void
get_cont_and_indices_for_bitmap_and_index(bitmap_t *bitmap,
		unsigned int index, dispatch_continuation_t *continuation_out,
		bitmap_t **supermap_out, unsigned int *bitmap_index_out)
{
	// m_for_c wants a continuation not a bitmap, but it works because it
	// just masks off the bottom bits of the address.
	struct dispatch_magazine_s *m = magazine_for_continuation((void *)bitmap);
	unsigned int mindex = (unsigned int)(bitmap - m->maps[0]);
	unsigned int bindex = mindex % BITMAPS_PER_SUPERMAP;
	unsigned int sindex = mindex / BITMAPS_PER_SUPERMAP;
	dispatch_assert(&m->maps[sindex][bindex] == bitmap);
	if (fastpath(continuation_out)) {
		*continuation_out = continuation_address(m, sindex, bindex, index);
	}
	if (fastpath(supermap_out)) *supermap_out = supermap_address(m, sindex);
	if (fastpath(bitmap_index_out)) *bitmap_index_out = bindex;
}
Esempio n. 21
0
// Return true if this bit was the last in the bitmap, and it is now all zeroes
DISPATCH_ALWAYS_INLINE_NDEBUG
static bool
bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index,
		bool exclusively)
{
#if DISPATCH_DEBUG
	dispatch_assert(index < CONTINUATIONS_PER_BITMAP);
#endif
	const bitmap_t mask = BITMAP_C(1) << index;
	bitmap_t b;

	if (exclusively == CLEAR_EXCLUSIVELY) {
		if (slowpath((*bitmap & mask) == 0)) {
			DISPATCH_CRASH("Corruption: failed to clear bit exclusively");
		}
	}

	// and-and-fetch
	b = dispatch_atomic_and(bitmap, ~mask, release);
	return b == 0;
}
Esempio n. 22
0
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da,
		dispatch_function_t func)
{
	uint32_t i = 0;
	dispatch_continuation_t head = NULL, tail = NULL;

	// The current thread does not need a continuation
	uint32_t continuation_cnt = da->da_thr_cnt - 1;

	dispatch_assert(continuation_cnt);

	for (i = 0; i < continuation_cnt; i++) {
		dispatch_continuation_t next = _dispatch_continuation_alloc();
		next->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
		next->dc_func = func;
		next->dc_ctxt = da;

		next->do_next = head;
		head = next;

		if (!tail) {
			tail = next;
		}
	}

	_dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore();
	da->da_sema = sema;

	_dispatch_queue_push_list(dq, head, tail, continuation_cnt);
	// Call the first element directly
	_dispatch_apply2(da);
	_dispatch_workitem_inc();

	_dispatch_thread_semaphore_wait(sema);
	_dispatch_put_thread_semaphore(sema);

}
Esempio n. 23
0
DISPATCH_NOINLINE
static dispatch_continuation_t
_dispatch_alloc_continuation_from_heap(dispatch_heap_t heap)
{
	dispatch_continuation_t cont;

	unsigned int cpu_number = _dispatch_cpu_number();
#ifdef DISPATCH_DEBUG
	dispatch_assert(cpu_number < NUM_CPU);
#endif

#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
	// First try the continuations in the first page for this CPU
	cont = alloc_continuation_from_first_page(&(heap[cpu_number]));
	if (fastpath(cont)) {
		return cont;
	}
#endif
	// Next, try the rest of the magazine for this CPU
	cont = alloc_continuation_from_magazine(&(heap[cpu_number]));
	return cont;
}
Esempio n. 24
0
int pim_assert_recv(struct interface *ifp,
		    struct pim_neighbor *neigh,
		    struct in_addr src_addr,
		    uint8_t *buf, int buf_size)
{
  struct prefix            msg_group_addr;
  struct prefix            msg_source_addr;
  struct pim_assert_metric msg_metric;
  int offset;
  uint8_t *curr;
  int curr_size;

  on_trace(__PRETTY_FUNCTION__, ifp, src_addr);

  curr      = buf;
  curr_size = buf_size;

  /*
    Parse assert group addr
   */
  offset = pim_parse_addr_group(ifp->name, src_addr,
				&msg_group_addr,
				curr, curr_size);
  if (offset < 1) {
    char src_str[100];
    pim_inet4_dump("<src?>", src_addr, src_str, sizeof(src_str));
    zlog_warn("%s: pim_parse_addr_group() failure: from %s on %s",
	      __PRETTY_FUNCTION__,
	      src_str, ifp->name);
    return -1;
  }
  curr      += offset;
  curr_size -= offset;

  /*
    Parse assert source addr
  */
  offset = pim_parse_addr_ucast(ifp->name, src_addr,
				&msg_source_addr,
				curr, curr_size);
  if (offset < 1) {
    char src_str[100];
    pim_inet4_dump("<src?>", src_addr, src_str, sizeof(src_str));
    zlog_warn("%s: pim_parse_addr_ucast() failure: from %s on %s",
	      __PRETTY_FUNCTION__,
	      src_str, ifp->name);
    return -2;
  }
  curr      += offset;
  curr_size -= offset;

  if (curr_size != 8) {
    char src_str[100];
    pim_inet4_dump("<src?>", src_addr, src_str, sizeof(src_str));
    zlog_warn("%s: preference/metric size is not 8: size=%d from %s on interface %s",
	      __PRETTY_FUNCTION__,
	      curr_size,
	      src_str, ifp->name);
    return -3;
  }

  /*
    Parse assert metric preference
  */

  msg_metric.metric_preference = pim_read_uint32_host(curr);

  msg_metric.rpt_bit_flag = msg_metric.metric_preference & 0x80000000; /* save highest bit */
  msg_metric.metric_preference &= ~0x80000000; /* clear highest bit */

  curr += 4;

  /*
    Parse assert route metric
  */

  msg_metric.route_metric = pim_read_uint32_host(curr);

  if (PIM_DEBUG_PIM_TRACE) {
    char neigh_str[100];
    char source_str[100];
    char group_str[100];
    pim_inet4_dump("<neigh?>", src_addr, neigh_str, sizeof(neigh_str));
    pim_inet4_dump("<src?>", msg_source_addr.u.prefix4, source_str, sizeof(source_str));
    pim_inet4_dump("<grp?>", msg_group_addr.u.prefix4, group_str, sizeof(group_str));
    zlog_debug("%s: from %s on %s: (S,G)=(%s,%s) pref=%u metric=%u rpt_bit=%u",
	       __PRETTY_FUNCTION__, neigh_str, ifp->name,
	       source_str, group_str,
	       msg_metric.metric_preference,
	       msg_metric.route_metric,
	       PIM_FORCE_BOOLEAN(msg_metric.rpt_bit_flag));
  }

  msg_metric.ip_address = src_addr;

  return dispatch_assert(ifp,
			 msg_source_addr.u.prefix4,
			 msg_group_addr.u.prefix4,
			 msg_metric);
}
Esempio n. 25
0
DISPATCH_NOINLINE
static void
_dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
{
#if HAVE_MACH
	kern_return_t kr;
	mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE;
	mach_vm_offset_t vm_mask = ~MAGAZINE_MASK;
	mach_vm_address_t vm_addr = vm_page_size;
	while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size,
			vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH),
			MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT))) {
		if (kr != KERN_NO_SPACE) {
			(void)dispatch_assume_zero(kr);
			DISPATCH_CLIENT_CRASH("Could not allocate heap");
		}
		_dispatch_temporary_resource_shortage();
		vm_addr = vm_page_size;
	}
	uintptr_t aligned_region = (uintptr_t)vm_addr;
#else // HAVE_MACH
	const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE;
	void *region_p;
	while (!dispatch_assume((region_p = mmap(NULL, region_sz,
			PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE,
			VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) {
		_dispatch_temporary_resource_shortage();
	}
	uintptr_t region = (uintptr_t)region_p;
	uintptr_t region_end = region + region_sz;
	uintptr_t aligned_region, aligned_region_end;
	uintptr_t bottom_slop_len, top_slop_len;
	// Realign if needed; find the slop at top/bottom to unmap
	if ((region & ~(MAGAZINE_MASK)) == 0) {
		bottom_slop_len = 0;
		aligned_region = region;
		aligned_region_end = region_end - BYTES_PER_MAGAZINE;
		top_slop_len = BYTES_PER_MAGAZINE;
	} else {
		aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE;
		aligned_region_end = aligned_region +
				(MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
		bottom_slop_len = aligned_region - region;
		top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len;
	}
#if DISPATCH_DEBUG
	// Double-check our math.
	dispatch_assert(aligned_region % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end > aligned_region);
	dispatch_assert(top_slop_len % PAGE_SIZE == 0);
	dispatch_assert(bottom_slop_len % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end + top_slop_len == region_end);
	dispatch_assert(region + bottom_slop_len == aligned_region);
	dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
			MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len,
				PROT_NONE));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)aligned_region_end,
				top_slop_len, PROT_NONE));
	}
#else
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)aligned_region_end,
				top_slop_len));
	}
#endif // DISPATCH_DEBUG
#endif // HAVE_MACH

	if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
			relaxed)) {
		// If we lost the race to link in the new region, unmap the whole thing.
#if DISPATCH_DEBUG
		(void)dispatch_assume_zero(mprotect((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE));
#else
		(void)dispatch_assume_zero(munmap((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE));
#endif
	}
}