Beispiel #1
0
void
flowadv_init(void)
{
	STAILQ_INIT(&fadv_list);

	/* Setup lock group and attribute for fadv_lock */
	fadv_lock_grp_attr = lck_grp_attr_alloc_init();
	fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr);
	lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL);

	fadv_zone_size = P2ROUNDUP(sizeof (struct flowadv_fcentry),
	    sizeof (u_int64_t));
	fadv_zone = zinit(fadv_zone_size,
	    FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME);
	if (fadv_zone == NULL) {
		panic("%s: failed allocating %s", __func__, FADV_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(fadv_zone, Z_EXPAND, TRUE);
	zone_change(fadv_zone, Z_CALLERACCT, FALSE);

	if (kernel_thread_start(flowadv_thread_func, NULL, &fadv_thread) !=
	    KERN_SUCCESS) {
		panic("%s: couldn't create flow event advisory thread",
		    __func__);
		/* NOTREACHED */
	}
	thread_deallocate(fadv_thread);
}
Beispiel #2
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
	kern_return_t			result;
	thread_t				thread;
	int						i;
	spl_t					s;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	s = splsched();
	thread_call_lock_spin();

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
	wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);

	queue_init(&thread_call_internal_queue);
	for (
	    	call = internal_call_storage;
			call < &internal_call_storage[internal_call_count];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	thread_call_unlock();
	splx(s);

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Beispiel #3
0
void
mac_labelzone_init(void)
{

	zone_label = zinit(sizeof(struct label),
	    8192 * sizeof(struct label),
	    sizeof(struct label), "MAC Labels");
	zone_change(zone_label, Z_EXPAND, TRUE);
	zone_change(zone_label, Z_EXHAUST, FALSE);
}
Beispiel #4
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	kern_return_t			result;
	thread_t			thread;
	int				i;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif

	nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs);
	wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO);

	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE);

	disable_ints_and_lock();

	queue_init(&thread_call_internal_queue);
	for (
			call = internal_call_storage;
			call < &internal_call_storage[INTERNAL_CALL_COUNT];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	enable_ints_and_unlock();

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Beispiel #5
0
void
rtalloc_init(
	void)
{
	kern_return_t retval;
	vm_offset_t min, addr;
	vm_size_t size;
	register int i;

	retval = kmem_suballoc(kernel_map, &min, rtalloc_map_size,
			       FALSE, TRUE, &rtalloc_map);
	if (retval != KERN_SUCCESS)
		panic("rtalloc_init: kmem_suballoc failed");

	/*
	 *	Ensure that zones up to size 8192 bytes exist.
	 *	This is desirable because messages are allocated
	 *	with rtalloc, and messages up through size 8192 are common.
	 */

	rtalloc_max = 16 * 1024;
	rtalloc_max_prerounded = rtalloc_max / 2 + 1;

	/*
	 *	Allocate a zone for each size we are going to handle.
	 *	We specify non-paged memory.  Make zone exhaustible.
	 */
	for (i = 0, size = 1; size < rtalloc_max; i++, size <<= 1) {
		if (size < RTALLOC_MINSIZE) {
			rt_zone[i] = 0;
			continue;
		}
		if (size == RTALLOC_MINSIZE) {
			first_rt_zone = i;
		}
		rt_zone[i] = zinit(size, rt_zone_max[i] * size, size,
				  rt_zone_name[i]);
		zone_change(rt_zone[i], Z_EXHAUST, TRUE);
		zone_change(rt_zone[i], Z_COLLECT, FALSE);
		zone_change(rt_zone[i], Z_EXPAND, FALSE);

		/*
		 * Get space from the zone_map.  Since these zones are
		 * not collectable, no pages containing elements from these
		 * zones will ever be reclaimed by the garbage collection
		 * scheme below.
		 */

		zprealloc(rt_zone[i], rt_zone_max[i] * size);
	}
}
Beispiel #6
0
/*
 * Initialize the kernel memory allocator
 */
void
kmeminit(void)
{
	struct kmzones	*kmz;

	if ((sizeof(kmzones)/sizeof(kmzones[0])) != (sizeof(memname)/sizeof(memname[0]))) {
		panic("kmeminit: kmzones has %lu elements but memname has %lu\n",
			  (sizeof(kmzones)/sizeof(kmzones[0])), (sizeof(memname)/sizeof(memname[0])));
	}

	kmz = kmzones;
	while (kmz < &kmzones[M_LAST]) {
/* XXX */
		if (kmz->kz_elemsize == (size_t)(-1))
			;
		else
/* XXX */
		if (kmz->kz_zalloczone == KMZ_CREATEZONE ||
		    kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT) {
			kmz->kz_zalloczone = zinit(kmz->kz_elemsize,
						1024 * 1024, PAGE_SIZE,
						memname[kmz - kmzones]);
			zone_change(kmz->kz_zalloczone, Z_CALLERACCT,
				    (kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT));

			if (kmz->kz_noencrypt == TRUE)
				zone_change(kmz->kz_zalloczone, Z_NOENCRYPT, TRUE);
		}
		else if (kmz->kz_zalloczone == KMZ_LOOKUPZONE)
			kmz->kz_zalloczone = kalloc_zone(kmz->kz_elemsize);

		kmz++;
	}

	kmz = kmzones;
	while (kmz < &kmzones[M_LAST]) {
/* XXX */
		if (kmz->kz_elemsize == (size_t)(-1))
			;
		else
/* XXX */
		if (kmz->kz_zalloczone == KMZ_SHAREZONE) {
			kmz->kz_zalloczone =
				kmzones[kmz->kz_elemsize].kz_zalloczone;
			kmz->kz_elemsize =
				kmzones[kmz->kz_elemsize].kz_elemsize;
		}

		kmz++;
	}
}
Beispiel #7
0
void
red_init(void)
{
	_CASSERT(REDF_ECN4 == CLASSQF_ECN4);
	_CASSERT(REDF_ECN6 == CLASSQF_ECN6);

	red_size = sizeof (red_t);
	red_zone = zinit(red_size, RED_ZONE_MAX * red_size,
	    0, RED_ZONE_NAME);
	if (red_zone == NULL) {
		panic("%s: failed allocating %s", __func__, RED_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(red_zone, Z_EXPAND, TRUE);
	zone_change(red_zone, Z_CALLERACCT, TRUE);
}
Beispiel #8
0
void
vnode_pager_bootstrap(void)
{
	register vm_size_t      size;

	size = (vm_size_t) sizeof(struct vnode_pager);
	vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
				PAGE_SIZE, "vnode pager structures");
	zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE);
	zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE);


#if CONFIG_CODE_DECRYPTION
	apple_protect_pager_bootstrap();
#endif	/* CONFIG_CODE_DECRYPTION */
	swapfile_pager_bootstrap();
	return;
}
Beispiel #9
0
/*
 *	ROUTINE:	semaphore_init		[private]
 *
 *	Initialize the semaphore mechanisms.
 *	Right now, we only need to initialize the semaphore zone.
 */      
void
semaphore_init(void)
{
  semaphore_zone = zinit(sizeof(struct semaphore),
			semaphore_max * sizeof(struct semaphore),
			sizeof(struct semaphore),
			"semaphores");
  zone_change(semaphore_zone, Z_NOENCRYPT, TRUE);
}
Beispiel #10
0
/*
 * Called by nd6_init() during initialization time.
 */
void
nd6_prproxy_init(void)
{
	ndprl_size = sizeof (struct nd6_prproxy_prelist);
	ndprl_zone = zinit(ndprl_size, NDPRL_ZONE_MAX * ndprl_size, 0,
	    NDPRL_ZONE_NAME);
	if (ndprl_zone == NULL)
		panic("%s: failed allocating ndprl_zone", __func__);

	zone_change(ndprl_zone, Z_EXPAND, TRUE);
	zone_change(ndprl_zone, Z_CALLERACCT, FALSE);

	solsrc_size = sizeof (struct nd6_prproxy_solsrc);
	solsrc_zone = zinit(solsrc_size, SOLSRC_ZONE_MAX * solsrc_size, 0,
	    SOLSRC_ZONE_NAME);
	if (solsrc_zone == NULL)
		panic("%s: failed allocating solsrc_zone", __func__);

	zone_change(solsrc_zone, Z_EXPAND, TRUE);
	zone_change(solsrc_zone, Z_CALLERACCT, FALSE);

	soltgt_size = sizeof (struct nd6_prproxy_soltgt);
	soltgt_zone = zinit(soltgt_size, SOLTGT_ZONE_MAX * soltgt_size, 0,
	    SOLTGT_ZONE_NAME);
	if (soltgt_zone == NULL)
		panic("%s: failed allocating soltgt_zone", __func__);

	zone_change(soltgt_zone, Z_EXPAND, TRUE);
	zone_change(soltgt_zone, Z_CALLERACCT, FALSE);
}
Beispiel #11
0
static void
uthread_zone_init(void)
{
	if (!uthread_zone_inited) {
		uthread_zone = zinit(sizeof(struct uthread),
					thread_max * sizeof(struct uthread),
					THREAD_CHUNK * sizeof(struct uthread),
					"uthreads");
		uthread_zone_inited = 1;

		zone_change(uthread_zone, Z_NOENCRYPT, TRUE);
	}
}
Beispiel #12
0
void
wait_queue_bootstrap(void)
{
	wait_queues_init();
	_wait_queue_zone = zinit(sizeof(struct wait_queue),
				      WAIT_QUEUE_MAX * sizeof(struct wait_queue),
				      sizeof(struct wait_queue),
				      "wait queues");
	zone_change(_wait_queue_zone, Z_NOENCRYPT, TRUE);

	_wait_queue_set_zone = zinit(sizeof(struct wait_queue_set),
				      WAIT_QUEUE_SET_MAX * sizeof(struct wait_queue_set),
				      sizeof(struct wait_queue_set),
				      "wait queue sets");
	zone_change(_wait_queue_set_zone, Z_NOENCRYPT, TRUE);

	_wait_queue_link_zone = zinit(sizeof(struct _wait_queue_link),
				      WAIT_QUEUE_LINK_MAX * sizeof(struct _wait_queue_link),
				      sizeof(struct _wait_queue_link),
				      "wait queue links");
	zone_change(_wait_queue_link_zone, Z_NOENCRYPT, TRUE);
}
Beispiel #13
0
void
qfq_init(void)
{
	qfq_size = sizeof (struct qfq_if);
	qfq_zone = zinit(qfq_size, QFQ_ZONE_MAX * qfq_size,
	    0, QFQ_ZONE_NAME);
	if (qfq_zone == NULL) {
		panic("%s: failed allocating %s", __func__, QFQ_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(qfq_zone, Z_EXPAND, TRUE);
	zone_change(qfq_zone, Z_CALLERACCT, TRUE);

	qfq_cl_size = sizeof (struct qfq_class);
	qfq_cl_zone = zinit(qfq_cl_size, QFQ_CL_ZONE_MAX * qfq_cl_size,
	    0, QFQ_CL_ZONE_NAME);
	if (qfq_cl_zone == NULL) {
		panic("%s: failed allocating %s", __func__, QFQ_CL_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(qfq_cl_zone, Z_EXPAND, TRUE);
	zone_change(qfq_cl_zone, Z_CALLERACCT, TRUE);
}
Beispiel #14
0
void
priq_init(void)
{
	priq_size = sizeof (struct priq_if);
	priq_zone = zinit(priq_size, PRIQ_ZONE_MAX * priq_size,
	    0, PRIQ_ZONE_NAME);
	if (priq_zone == NULL) {
		panic("%s: failed allocating %s", __func__, PRIQ_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(priq_zone, Z_EXPAND, TRUE);
	zone_change(priq_zone, Z_CALLERACCT, TRUE);

	priq_cl_size = sizeof (struct priq_class);
	priq_cl_zone = zinit(priq_cl_size, PRIQ_CL_ZONE_MAX * priq_cl_size,
	    0, PRIQ_CL_ZONE_NAME);
	if (priq_cl_zone == NULL) {
		panic("%s: failed allocating %s", __func__, PRIQ_CL_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(priq_cl_zone, Z_EXPAND, TRUE);
	zone_change(priq_cl_zone, Z_CALLERACCT, TRUE);
}
Beispiel #15
0
void
tcq_init(void)
{
	tcq_size = sizeof (struct tcq_if);
	tcq_zone = zinit(tcq_size, TCQ_ZONE_MAX * tcq_size,
	    0, TCQ_ZONE_NAME);
	if (tcq_zone == NULL) {
		panic("%s: failed allocating %s", __func__, TCQ_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(tcq_zone, Z_EXPAND, TRUE);
	zone_change(tcq_zone, Z_CALLERACCT, TRUE);

	tcq_cl_size = sizeof (struct tcq_class);
	tcq_cl_zone = zinit(tcq_cl_size, TCQ_CL_ZONE_MAX * tcq_cl_size,
	    0, TCQ_CL_ZONE_NAME);
	if (tcq_cl_zone == NULL) {
		panic("%s: failed allocating %s", __func__, TCQ_CL_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(tcq_cl_zone, Z_EXPAND, TRUE);
	zone_change(tcq_cl_zone, Z_CALLERACCT, TRUE);
}
Beispiel #16
0
void
fairq_init(void)
{
	fairq_size = sizeof (struct fairq_if);
	fairq_zone = zinit(fairq_size, FAIRQ_ZONE_MAX * fairq_size,
	    0, FAIRQ_ZONE_NAME);
	if (fairq_zone == NULL) {
		panic("%s: failed allocating %s", __func__, FAIRQ_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(fairq_zone, Z_EXPAND, TRUE);
	zone_change(fairq_zone, Z_CALLERACCT, TRUE);

	fairq_cl_size = sizeof (struct fairq_class);
	fairq_cl_zone = zinit(fairq_cl_size, FAIRQ_CL_ZONE_MAX * fairq_cl_size,
	    0, FAIRQ_CL_ZONE_NAME);
	if (fairq_cl_zone == NULL) {
		panic("%s: failed allocating %s", __func__, FAIRQ_CL_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(fairq_cl_zone, Z_EXPAND, TRUE);
	zone_change(fairq_cl_zone, Z_CALLERACCT, TRUE);
}
Beispiel #17
0
/*
 * Initialize the framework; this is currently called as part of BSD init.
 */
__private_extern__ void
mcache_init(void)
{
	mcache_bkttype_t *btp;
	unsigned int i;
	char name[32];

	ncpu = ml_get_max_cpus();

	mcache_llock_grp_attr = lck_grp_attr_alloc_init();
	mcache_llock_grp = lck_grp_alloc_init("mcache.list",
	    mcache_llock_grp_attr);
	mcache_llock_attr = lck_attr_alloc_init();
	mcache_llock = lck_mtx_alloc_init(mcache_llock_grp, mcache_llock_attr);

	mcache_zone = zinit(MCACHE_ALLOC_SIZE, 256 * MCACHE_ALLOC_SIZE,
	    PAGE_SIZE, "mcache");
	if (mcache_zone == NULL)
		panic("mcache_init: failed to allocate mcache zone\n");
	zone_change(mcache_zone, Z_CALLERACCT, FALSE);

	LIST_INIT(&mcache_head);

	for (i = 0; i < sizeof (mcache_bkttype) / sizeof (*btp); i++) {
		btp = &mcache_bkttype[i];
		(void) snprintf(name, sizeof (name), "bkt_%d",
		    btp->bt_bktsize);
		btp->bt_cache = mcache_create(name,
		    (btp->bt_bktsize + 1) * sizeof (void *), 0, 0, MCR_SLEEP);
	}

	PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof (mcache_flags));
	mcache_flags &= MCF_FLAGS_MASK;

	mcache_audit_cache = mcache_create("audit", sizeof (mcache_audit_t),
	    0, 0, MCR_SLEEP);

	mcache_reap_interval = 15 * hz;
	mcache_applyall(mcache_cache_bkt_enable);
	mcache_ready = 1;
}
Beispiel #18
0
void
arp_init(void)
{
	if (arpinit_done) {
		log(LOG_NOTICE, "arp_init called more than once (ignored)\n");
		return;
	}

	LIST_INIT(&llinfo_arp);

	llinfo_arp_zone = zinit(sizeof (struct llinfo_arp),
	    LLINFO_ARP_ZONE_MAX * sizeof (struct llinfo_arp), 0,
	    LLINFO_ARP_ZONE_NAME);
	if (llinfo_arp_zone == NULL)
		panic("%s: failed allocating llinfo_arp_zone", __func__);

	zone_change(llinfo_arp_zone, Z_EXPAND, TRUE);

	arpinit_done = 1;

	/* start timer */
	timeout(arptimer, (caddr_t)0, hz);
}
Beispiel #19
0
void
sfb_init(void)
{
	_CASSERT(SFBF_ECN4 == CLASSQF_ECN4);
	_CASSERT(SFBF_ECN6 == CLASSQF_ECN6);

	sfb_size = sizeof (struct sfb);
	sfb_zone = zinit(sfb_size, SFB_ZONE_MAX * sfb_size,
	    0, SFB_ZONE_NAME);
	if (sfb_zone == NULL) {
		panic("%s: failed allocating %s", __func__, SFB_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(sfb_zone, Z_EXPAND, TRUE);
	zone_change(sfb_zone, Z_CALLERACCT, TRUE);

	sfb_bins_size = sizeof (*((struct sfb *)0)->sfb_bins);
	sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size,
	    0, SFB_BINS_ZONE_NAME);
	if (sfb_bins_zone == NULL) {
		panic("%s: failed allocating %s", __func__, SFB_BINS_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(sfb_bins_zone, Z_EXPAND, TRUE);
	zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE);

	sfb_fcl_size = sizeof (*((struct sfb *)0)->sfb_fc_lists);
	sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size,
	    0, SFB_FCL_ZONE_NAME);
	if (sfb_fcl_zone == NULL) {
		panic("%s: failed allocating %s", __func__, SFB_FCL_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(sfb_fcl_zone, Z_EXPAND, TRUE);
	zone_change(sfb_fcl_zone, Z_CALLERACCT, TRUE);
}
Beispiel #20
0
void
kalloc_init(
	void)
{
	kern_return_t retval;
	vm_offset_t min;
	vm_size_t size, kalloc_map_size;
	register int i;

	/* 
	 * Scale the kalloc_map_size to physical memory size: stay below 
	 * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel).
	 */
	kalloc_map_size = (vm_size_t)(sane_size >> 5);
#if !__LP64__
	if (kalloc_map_size > KALLOC_MAP_SIZE_MAX)
		kalloc_map_size = KALLOC_MAP_SIZE_MAX;
#endif /* !__LP64__ */
	if (kalloc_map_size < KALLOC_MAP_SIZE_MIN)
		kalloc_map_size = KALLOC_MAP_SIZE_MIN;

	retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
			       FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT,
			       &kalloc_map);

	if (retval != KERN_SUCCESS)
		panic("kalloc_init: kmem_suballoc failed");

	kalloc_map_min = min;
	kalloc_map_max = min + kalloc_map_size - 1;

	/*
	 *	Ensure that zones up to size 8192 bytes exist.
	 *	This is desirable because messages are allocated
	 *	with kalloc, and messages up through size 8192 are common.
	 */

	if (PAGE_SIZE < 16*1024)
		kalloc_max = 16*1024;
	else
		kalloc_max = PAGE_SIZE;
	kalloc_max_prerounded = kalloc_max / 2 + 1;
	/* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
	kalloc_kernmap_size = (kalloc_max * 16) + 1;
	kalloc_largest_allocated = kalloc_kernmap_size;

	/*
	 *	Allocate a zone for each size we are going to handle.
	 *	We specify non-paged memory.  Don't charge the caller
	 *	for the allocation, as we aren't sure how the memory
	 *	will be handled.
	 */
	for (i = 0; (size = k_zone_size[i]) < kalloc_max; i++) {
		k_zone[i] = zinit(size, k_zone_max[i] * size, size,
				  k_zone_name[i]);
		zone_change(k_zone[i], Z_CALLERACCT, FALSE);
	}

	/*
	 * Build the Direct LookUp Table for small allocations
	 */
	for (i = 0, size = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) {
		int zindex = 0;

		while ((vm_size_t)k_zone_size[zindex] < size)
			zindex++;

		if (i == N_K_ZDLUT) {
			k_zindex_start = zindex;
			break;
		}
		k_zone_dlut[i] = (int8_t)zindex;
	}

#ifdef KALLOC_DEBUG
	printf("kalloc_init: k_zindex_start %d\n", k_zindex_start);

	/*
	 * Do a quick synthesis to see how well/badly we can
	 * find-a-zone for a given size.
	 * Useful when debugging/tweaking the array of zone sizes.
	 * Cache misses probably more critical than compare-branches!
	 */
	for (i = 0; i < (int)N_K_ZONE; i++) {
		vm_size_t testsize = (vm_size_t)k_zone_size[i] - 1;
		int compare = 0;
		int zindex;

		if (testsize < MAX_SIZE_ZDLUT) {
			compare += 1;	/* 'if' (T) */

			long dindex = INDEX_ZDLUT(testsize);
			zindex = (int)k_zone_dlut[dindex];

		} else if (testsize < kalloc_max_prerounded) {

			compare += 2;	/* 'if' (F), 'if' (T) */

			zindex = k_zindex_start;
			while ((vm_size_t)k_zone_size[zindex] < testsize) {
				zindex++;
				compare++;	/* 'while' (T) */
			}
			compare++;	/* 'while' (F) */
		} else
			break;	/* not zone-backed */

		zone_t z = k_zone[zindex];
		printf("kalloc_init: req size %4lu: %11s took %d compare%s\n",
		    (unsigned long)testsize, z->zone_name, compare,
		    compare == 1 ? "" : "s");
	}
#endif
	kalloc_lck_grp = lck_grp_alloc_init("kalloc.large", LCK_GRP_ATTR_NULL);
	lck_mtx_init(&kalloc_lock, kalloc_lck_grp, LCK_ATTR_NULL);
	OSMalloc_init();
#ifdef	MUTEX_ZONE	
	lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx");
#endif	
}
Beispiel #21
0
void
default_pager_initialize(void)
{
	kern_return_t		kr;
	__unused static char	here[] = "default_pager_initialize";

	lck_grp_attr_setdefault(&default_pager_lck_grp_attr);
	lck_grp_init(&default_pager_lck_grp, "default_pager", &default_pager_lck_grp_attr);
	lck_attr_setdefault(&default_pager_lck_attr);	

	/*
	 * Vm variables.
	 */
#ifndef MACH_KERNEL
	vm_page_mask = vm_page_size - 1;
	assert((unsigned int) vm_page_size == vm_page_size);
	vm_page_shift = local_log2((unsigned int) vm_page_size);
#endif

	/*
	 * List of all vstructs.
	 */
	vstruct_zone = zinit(sizeof(struct vstruct),
			     10000 * sizeof(struct vstruct),
			     8192, "vstruct zone");
	zone_change(vstruct_zone, Z_CALLERACCT, FALSE);
	zone_change(vstruct_zone, Z_NOENCRYPT, TRUE);

	VSL_LOCK_INIT();
	queue_init(&vstruct_list.vsl_queue);
	vstruct_list.vsl_count = 0;

	VSTATS_LOCK_INIT(&global_stats.gs_lock);

	bs_initialize();

	/*
	 * Exported DMM port.
	 */
	default_pager_object = ipc_port_alloc_kernel();


	/*
	 * Export pager interfaces.
	 */
#ifdef	USER_PAGER
	if ((kr = netname_check_in(name_server_port, "UserPager",
				   default_pager_self,
				   default_pager_object))
	    != KERN_SUCCESS) {
		dprintf(("netname_check_in returned 0x%x\n", kr));
		exit(1);
	}
#else	/* USER_PAGER */
	{
		unsigned int clsize;
		memory_object_default_t dmm;

		dmm = default_pager_object;
		assert((unsigned int) vm_page_size == vm_page_size);
		clsize = ((unsigned int) vm_page_size << vstruct_def_clshift);
		kr = host_default_memory_manager(host_priv_self(), &dmm, clsize);
		if ((kr != KERN_SUCCESS) ||
		    (dmm != MEMORY_OBJECT_DEFAULT_NULL))
			Panic("default memory manager");

	}
#endif	/* USER_PAGER */


}