Exemplo n.º 1
0
/*
 * Initialize the eventhandler mutex and list.
 */
void
eventhandler_init(void)
{
	eventhandler_mutex_grp_attr = lck_grp_attr_alloc_init();
	eventhandler_mutex_grp = lck_grp_alloc_init("eventhandler",
	    eventhandler_mutex_grp_attr);
	eventhandler_mutex_attr = lck_attr_alloc_init();

	el_lock_grp_attr = lck_grp_attr_alloc_init();
	el_lock_grp = lck_grp_alloc_init("eventhandler list",
	    el_lock_grp_attr);
	el_lock_attr = lck_attr_alloc_init();

	eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb);
}
Exemplo n.º 2
0
/* initial setup done at time of sysinit */
void
pipeinit(void)
{
	nbigpipe=0;
	vm_size_t zone_size;
 
	zone_size = 8192 * sizeof(struct pipe);
        pipe_zone = zinit(sizeof(struct pipe), zone_size, 4096, "pipe zone");


	/* allocate lock group attribute and group for pipe mutexes */
	pipe_mtx_grp_attr = lck_grp_attr_alloc_init();
	pipe_mtx_grp = lck_grp_alloc_init("pipe", pipe_mtx_grp_attr);

	/* allocate the lock attribute for pipe mutexes */
	pipe_mtx_attr = lck_attr_alloc_init();

	/*
	 * Set up garbage collection for dead pipes
	 */
	zone_size = (PIPE_GARBAGE_QUEUE_LIMIT + 20) *
	    sizeof(struct pipe_garbage);
        pipe_garbage_zone = (zone_t)zinit(sizeof(struct pipe_garbage),
	    zone_size, 4096, "pipe garbage zone");
	pipe_garbage_lock = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr);
	
}
Exemplo n.º 3
0
void
kpc_thread_init(void)
{
	kpc_thread_lckgrp_attr = lck_grp_attr_alloc_init();
	kpc_thread_lckgrp = lck_grp_alloc_init("kpc", kpc_thread_lckgrp_attr);
	lck_mtx_init(&kpc_thread_lock, kpc_thread_lckgrp, LCK_ATTR_NULL);
}
Exemplo n.º 4
0
funnel_t *
funnel_alloc(
	int type)
{
	lck_mtx_t	*m;
	funnel_t	*fnl;

	if (funnel_lck_grp == LCK_GRP_NULL) {
		funnel_lck_grp_attr = lck_grp_attr_alloc_init();

		funnel_lck_grp = lck_grp_alloc_init("Funnel",  funnel_lck_grp_attr);

		funnel_lck_attr = lck_attr_alloc_init();
	}
	if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
		bzero((void *)fnl, sizeof(funnel_t));
		if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
			kfree(fnl, sizeof(funnel_t));
			return(THR_FUNNEL_NULL);
		}
		fnl->fnl_mutex = m;
		fnl->fnl_type = type;
	}
	return(fnl);
}
Exemplo n.º 5
0
void
tcp_lro_init(void)
{
	int i;

	bzero(lro_flow_list, sizeof (struct lro_flow) * TCP_LRO_NUM_FLOWS);
	for (i = 0; i < TCP_LRO_FLOW_MAP; i++) {
		lro_flow_map[i] = TCP_LRO_FLOW_UNINIT;
	}

	/*
	 * allocate lock group attribute, group and attribute for tcp_lro_lock
	 */
	tcp_lro_mtx_grp_attr = lck_grp_attr_alloc_init();
	tcp_lro_mtx_grp = lck_grp_alloc_init("tcplro", tcp_lro_mtx_grp_attr);
	tcp_lro_mtx_attr = lck_attr_alloc_init();
	lck_mtx_init(&tcp_lro_lock, tcp_lro_mtx_grp, tcp_lro_mtx_attr);

	tcp_lro_timer = thread_call_allocate(tcp_lro_timer_proc, NULL);
	if (tcp_lro_timer == NULL) {
		panic_plain("%s: unable to allocate lro timer", __func__);
	}

	return;
}
Exemplo n.º 6
0
void
os_reason_init()
{
	int reasons_allocated = 0;

	/*
	 * Initialize OS reason group and lock attributes
	 */
	os_reason_lock_grp_attr =  lck_grp_attr_alloc_init();
	os_reason_lock_grp = lck_grp_alloc_init("os_reason_lock", os_reason_lock_grp_attr);
	os_reason_lock_attr = lck_attr_alloc_init();

	/*
	 * Create OS reason zone.
	 */
	os_reason_zone = zinit(sizeof(struct os_reason), OS_REASON_MAX_COUNT * sizeof(struct os_reason),
				OS_REASON_MAX_COUNT, "os reasons");
	if (os_reason_zone == NULL) {
		panic("failed to initialize os_reason_zone");
	}

	/*
	 * We pre-fill the OS reason zone to reduce the likelihood that
	 * the jetsam thread and others block when they create an exit
	 * reason. This pre-filled memory is not-collectable since it's
	 * foreign memory crammed in as part of zfill().
	 */
	reasons_allocated = zfill(os_reason_zone, OS_REASON_RESERVE_COUNT);
	assert(reasons_allocated > 0);
}
Exemplo n.º 7
0
void
kpc_common_init(void)
{
	kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
	kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
	lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
}
Exemplo n.º 8
0
/**
 * Mount helper: allocate locking group attribute and locking group itself.
 * Store allocated data into VBoxVFS private data.
 *
 * @param pMount   VBoxVFS global data which will be updated with
 *                          locking group and its attribute in case of success;
 *                          otherwise pMount unchanged.
 *
 * @return 0 on success or BSD error code otherwise.
 *
 */
static int
vboxvfs_prepare_locking(vboxvfs_mount_t *pMount)
{
    lck_grp_attr_t *pGrpAttr;
    lck_grp_t      *pGrp;

    AssertReturn(pMount, EINVAL);

    pGrpAttr = lck_grp_attr_alloc_init();
    if (pGrpAttr)
    {
        pGrp = lck_grp_alloc_init("VBoxVFS", pGrpAttr);
        if (pGrp)
        {
            pMount->pLockGroupAttr = pGrpAttr;
            pMount->pLockGroup      = pGrp;

            return 0;
        }
        else
            PERROR("Unable to allocate locking group");

        lck_grp_attr_free(pGrpAttr);
    }
    else
        PERROR("Unable to allocate locking group attribute");

    return ENOMEM;
}
Exemplo n.º 9
0
void
flowadv_init(void)
{
	STAILQ_INIT(&fadv_list);

	/* Setup lock group and attribute for fadv_lock */
	fadv_lock_grp_attr = lck_grp_attr_alloc_init();
	fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr);
	lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL);

	fadv_zone_size = P2ROUNDUP(sizeof (struct flowadv_fcentry),
	    sizeof (u_int64_t));
	fadv_zone = zinit(fadv_zone_size,
	    FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME);
	if (fadv_zone == NULL) {
		panic("%s: failed allocating %s", __func__, FADV_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(fadv_zone, Z_EXPAND, TRUE);
	zone_change(fadv_zone, Z_CALLERACCT, FALSE);

	if (kernel_thread_start(flowadv_thread_func, NULL, &fadv_thread) !=
	    KERN_SUCCESS) {
		panic("%s: couldn't create flow event advisory thread",
		    __func__);
		/* NOTREACHED */
	}
	thread_deallocate(fadv_thread);
}
Exemplo n.º 10
0
void
cs_init(void)
{
#if MACH_ASSERT && __x86_64__
	panic_on_cs_killed = 1;
#endif /* MACH_ASSERT && __x86_64__ */
	PE_parse_boot_argn("panic_on_cs_killed", &panic_on_cs_killed,
			   sizeof (panic_on_cs_killed));
#if !SECURE_KERNEL
	int disable_cs_enforcement = 0;
	PE_parse_boot_argn("cs_enforcement_disable", &disable_cs_enforcement, 
			   sizeof (disable_cs_enforcement));
	if (disable_cs_enforcement) {
		cs_enforcement_enable = 0;
	} else {
		int panic = 0;
		PE_parse_boot_argn("cs_enforcement_panic", &panic, sizeof(panic));
		cs_enforcement_panic = (panic != 0);
	}

	PE_parse_boot_argn("cs_debug", &cs_debug, sizeof (cs_debug));

#if !CONFIG_ENFORCE_LIBRARY_VALIDATION
	PE_parse_boot_argn("cs_library_val_enable", &cs_library_val_enable,
			   sizeof (cs_library_val_enable));
#endif
#endif /* !SECURE_KERNEL */

	lck_grp_attr_t *attr = lck_grp_attr_alloc_init();
	cs_lockgrp = lck_grp_alloc_init("KERNCS", attr);
}
Exemplo n.º 11
0
/*
 * Initialize cnode hash table.
 */
__private_extern__
void
hfs_chashinit()
{
	chash_lck_grp_attr= lck_grp_attr_alloc_init();
	chash_lck_grp  = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
	chash_lck_attr = lck_attr_alloc_init();
}
Exemplo n.º 12
0
void
init_system_override()
{
	sys_override_mtx_grp_attr = lck_grp_attr_alloc_init();
	sys_override_mtx_grp = lck_grp_alloc_init("system_override", sys_override_mtx_grp_attr);
	sys_override_mtx_attr = lck_attr_alloc_init();
	lck_mtx_init(&sys_override_lock, sys_override_mtx_grp, sys_override_mtx_attr);
	io_throttle_assert_cnt = cpu_throttle_assert_cnt = 0;
}
Exemplo n.º 13
0
/* Initialize the mutex governing access to the SysV msg subsystem */
__private_extern__ void
sysv_msg_lock_init( void )
{
	sysv_msg_subsys_lck_grp_attr = lck_grp_attr_alloc_init();

	sysv_msg_subsys_lck_grp = lck_grp_alloc_init("sysv_msg_subsys_lock", sysv_msg_subsys_lck_grp_attr);

	sysv_msg_subsys_lck_attr = lck_attr_alloc_init();
	lck_mtx_init(&sysv_msg_subsys_mutex, sysv_msg_subsys_lck_grp, sysv_msg_subsys_lck_attr);
}
Exemplo n.º 14
0
void
kperf_bootstrap(void)
{
	kperf_cfg_lckgrp_attr = lck_grp_attr_alloc_init();
	kperf_cfg_lckgrp = lck_grp_alloc_init("kperf cfg", 
                                          kperf_cfg_lckgrp_attr);
	lck_mtx_init(&kperf_cfg_lock, kperf_cfg_lckgrp, LCK_ATTR_NULL);

	kperf_cfg_initted = TRUE;
}
Exemplo n.º 15
0
static inline void setup_locks() {
    /* Create locks.  Cannot be done on the stack. */
    osquery.lck_grp_attr = lck_grp_attr_alloc_init();
    lck_grp_attr_setstat(osquery.lck_grp_attr);

    osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr);

    osquery.lck_attr = lck_attr_alloc_init();

    osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr);
}
Exemplo n.º 16
0
static inline void setup_locks() {
  // Create locks. Cannot be done on the stack.
  osquery.lck_grp_attr = lck_grp_attr_alloc_init();
  lck_grp_attr_setstat(osquery.lck_grp_attr);
  osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr);
  osquery.lck_attr = lck_attr_alloc_init();

  // MTX is the IOCTL API handling lock.
  // This assures only one daemon will use the kernel API simultaneously.
  osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr);
}
Exemplo n.º 17
0
Arquivo: bpf.c Projeto: SbIm/xnu-env
void
bpf_init(__unused void *unused)
{
#ifdef __APPLE__
	int 	i;
	int	maj;

	if (bpf_devsw_installed == 0) {
		bpf_devsw_installed = 1;

        bpf_mlock_grp_attr = lck_grp_attr_alloc_init();

        bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);

        bpf_mlock_attr = lck_attr_alloc_init();

        bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr);

		if (bpf_mlock == 0) {
			printf("bpf_init: failed to allocate bpf_mlock\n");
			bpf_devsw_installed = 0;
			return;
		}
		
		maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
		if (maj == -1) {
			if (bpf_mlock)
				lck_mtx_free(bpf_mlock, bpf_mlock_grp);
			if (bpf_mlock_attr)
				lck_attr_free(bpf_mlock_attr);
			if (bpf_mlock_grp)
				lck_grp_free(bpf_mlock_grp);
			if (bpf_mlock_grp_attr)
				lck_grp_attr_free(bpf_mlock_grp_attr);
			
			bpf_mlock = NULL;
			bpf_mlock_attr = NULL;
			bpf_mlock_grp = NULL;
			bpf_mlock_grp_attr = NULL;
			bpf_devsw_installed = 0;
			printf("bpf_init: failed to allocate a major number!\n");
			return;
		}

		for (i = 0 ; i < NBPFILTER; i++)
			bpf_make_dev_t(maj);
	}
#else
	cdevsw_add(&bpf_cdevsw);
#endif
}
Exemplo n.º 18
0
rMutex
    rpal_mutex_create
    (

    )
{
    lck_mtx_t* mutex = NULL;
    
    lck_grp_attr_t* gattr = NULL;
    lck_attr_t* lattr = NULL;
    
    if( 0 == g_lck_group )
    {
        rpal_debug_info( "mutex group not created, creating" );
        
        gattr = lck_grp_attr_alloc_init();
        
        if( NULL == gattr )
        {
            rpal_debug_critical( "could not create mutex group" );
            return NULL;
        }
        
        lck_grp_attr_setstat( gattr );
        
        g_lck_group = lck_grp_alloc_init( "hcphbs", gattr );
        
        lck_grp_attr_free( gattr );
    }
    
    if( NULL == g_lck_group )
    {
        return NULL;
    }
    
    lattr = lck_attr_alloc_init();
    
    if( NULL != lattr )
    {
        mutex = lck_mtx_alloc_init( g_lck_group, lattr );
        lck_attr_free( lattr );
    }
    else
    {
        rpal_debug_critical( "could not create mutex attributes" );
    }
    
    return mutex;
}
Exemplo n.º 19
0
/*
 * Initialize raw connection block q.
 */
void
rip_init(struct protosw *pp, struct domain *dp)
{
#pragma unused(dp)
	static int rip_initialized = 0;
	struct inpcbinfo *pcbinfo;

	VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);

	if (rip_initialized)
		return;
	rip_initialized = 1;

	LIST_INIT(&ripcb);
	ripcbinfo.ipi_listhead = &ripcb;
	/*
	 * XXX We don't use the hash list for raw IP, but it's easier
	 * to allocate a one entry hash list than it is to check all
	 * over the place for ipi_hashbase == NULL.
	 */
	ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask);
	ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask);

	ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb),
	    (4096 * sizeof(struct inpcb)), 4096, "ripzone");

	pcbinfo = &ripcbinfo;
        /*
	 * allocate lock group attribute and group for udp pcb mutexes
	 */
	pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
	pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr);

	/*
	 * allocate the lock attribute for udp pcb mutexes
	 */
	pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
	if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
	    pcbinfo->ipi_lock_attr)) == NULL) {
		panic("%s: unable to allocate PCB lock\n", __func__);
		/* NOTREACHED */
	}

	in_pcbinfo_attach(&ripcbinfo);
}
Exemplo n.º 20
0
/* This should only be called from the bootstrap thread. */
void
ktrace_init(void)
{
	static lck_grp_attr_t *lock_grp_attr = NULL;
	static lck_grp_t *lock_grp = NULL;
	static boolean_t initialized = FALSE;

	if (initialized) {
		return;
	}

	lock_grp_attr = lck_grp_attr_alloc_init();
	lock_grp = lck_grp_alloc_init("ktrace", lock_grp_attr);
	lck_grp_attr_free(lock_grp_attr);

	ktrace_lock = lck_mtx_alloc_init(lock_grp, LCK_ATTR_NULL);
	assert(ktrace_lock);
	initialized = TRUE;
}
Exemplo n.º 21
0
void lpx_datagram_init()
{
	DEBUG_PRINT(DEBUG_MASK_DGRAM_TRACE, ("lpx_datagram_init: Entered.\n"));
	
	// Init Lock.
	datagram_mtx_grp_attr = lck_grp_attr_alloc_init();
	lck_grp_attr_setdefault(datagram_mtx_grp_attr);
	
	datagram_mtx_grp = lck_grp_alloc_init("datagrampcb", datagram_mtx_grp_attr);
	
	datagram_mtx_attr = lck_attr_alloc_init();
	lck_attr_setdefault(datagram_mtx_attr);
	
	if ((lpx_datagram_pcb.lpxp_list_rw = lck_rw_alloc_init(datagram_mtx_grp, datagram_mtx_attr)) == NULL) {
		DEBUG_PRINT(DEBUG_MASK_STREAM_ERROR, ("lpx_datagram_init: Can't alloc mtx\n"));
	}
	
	return;
}
Exemplo n.º 22
0
static kern_return_t
register_locks(void)
{
	/* already allocated? */
	if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock)
		return KERN_SUCCESS;

	/* allocate lock group attribute and group */
	if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init()))
		goto nomem_out;

	lck_grp_attr_setstat(ucode_slock_grp_attr);

	if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr)))
		goto nomem_out;

	/* Allocate lock attribute */
	if (!(ucode_slock_attr = lck_attr_alloc_init()))
		goto nomem_out;

	/* Allocate the spin lock */
	/* We keep one global spin-lock. We could have one per update
	 * request... but srsly, why would you update microcode like that?
	 */
	if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr)))
		goto nomem_out;

	return KERN_SUCCESS;

nomem_out:
	/* clean up */
	if (ucode_slock)
		lck_spin_free(ucode_slock, ucode_slock_grp);
	if (ucode_slock_attr)
		lck_attr_free(ucode_slock_attr);
	if (ucode_slock_grp)
		lck_grp_free(ucode_slock_grp);
	if (ucode_slock_grp_attr)
		lck_grp_attr_free(ucode_slock_grp_attr);

	return KERN_NO_SPACE;
}
ADT_LOCK _adt_xnu_rw_lock_init(int max_threads)
{
    ADT_LOCK rwlock;
    
    rwlock = adt_malloc(sizeof(adt_lock_t));
    if(rwlock == NULL)
    {
        goto end;
    }

    rwlock->rw_lock_grp_attr=lck_grp_attr_alloc_init();
    //TODO:not sure if the name needs to be unique
    rwlock->rw_lock_grp=lck_grp_alloc_init("adt_rw_lock", rwlock->rw_lock_grp_attr);
    rwlock->rw_lock_attr=lck_attr_alloc_init();
    rwlock->rw_lock=lck_rw_alloc_init(rwlock->rw_lock_grp, rwlock->rw_lock_attr);


end:
    return rwlock;
}
Exemplo n.º 24
0
Arquivo: mcache.c Projeto: Algozjb/xnu
/*
 * Initialize the framework; this is currently called as part of BSD init.
 */
__private_extern__ void
mcache_init(void)
{
	mcache_bkttype_t *btp;
	unsigned int i;
	char name[32];

	ncpu = ml_get_max_cpus();

	mcache_llock_grp_attr = lck_grp_attr_alloc_init();
	mcache_llock_grp = lck_grp_alloc_init("mcache.list",
	    mcache_llock_grp_attr);
	mcache_llock_attr = lck_attr_alloc_init();
	mcache_llock = lck_mtx_alloc_init(mcache_llock_grp, mcache_llock_attr);

	mcache_zone = zinit(MCACHE_ALLOC_SIZE, 256 * MCACHE_ALLOC_SIZE,
	    PAGE_SIZE, "mcache");
	if (mcache_zone == NULL)
		panic("mcache_init: failed to allocate mcache zone\n");
	zone_change(mcache_zone, Z_CALLERACCT, FALSE);

	LIST_INIT(&mcache_head);

	for (i = 0; i < sizeof (mcache_bkttype) / sizeof (*btp); i++) {
		btp = &mcache_bkttype[i];
		(void) snprintf(name, sizeof (name), "bkt_%d",
		    btp->bt_bktsize);
		btp->bt_cache = mcache_create(name,
		    (btp->bt_bktsize + 1) * sizeof (void *), 0, 0, MCR_SLEEP);
	}

	PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof (mcache_flags));
	mcache_flags &= MCF_FLAGS_MASK;

	mcache_audit_cache = mcache_create("audit", sizeof (mcache_audit_t),
	    0, 0, MCR_SLEEP);

	mcache_reap_interval = 15 * hz;
	mcache_applyall(mcache_cache_bkt_enable);
	mcache_ready = 1;
}
Exemplo n.º 25
0
/*
 * Initialise cache headers
 */
int
nullfs_init(__unused struct vfsconf * vfsp)
{
	NULLFSDEBUG("%s\n", __FUNCTION__);

	/* assuming for now that this happens immediately and by default after fs
	 * installation */
	null_hashlck_grp_attr = lck_grp_attr_alloc_init();
	if (null_hashlck_grp_attr == NULL) {
		goto error;
	}
	null_hashlck_grp = lck_grp_alloc_init("com.apple.filesystems.nullfs", null_hashlck_grp_attr);
	if (null_hashlck_grp == NULL) {
		goto error;
	}
	null_hashlck_attr = lck_attr_alloc_init();
	if (null_hashlck_attr == NULL) {
		goto error;
	}

	lck_mtx_init(&null_hashmtx, null_hashlck_grp, null_hashlck_attr);
	null_node_hashtbl = hashinit(NULL_HASH_SIZE, M_TEMP, &null_hash_mask);
	NULLFSDEBUG("%s finished\n", __FUNCTION__);
	return (0);
error:
	printf("NULLFS: failed to get lock element\n");
	if (null_hashlck_grp_attr) {
		lck_grp_attr_free(null_hashlck_grp_attr);
		null_hashlck_grp_attr = NULL;
	}
	if (null_hashlck_grp) {
		lck_grp_free(null_hashlck_grp);
		null_hashlck_grp = NULL;
	}
	if (null_hashlck_attr) {
		lck_attr_free(null_hashlck_attr);
		null_hashlck_attr = NULL;
	}
	return KERN_FAILURE;
}
Exemplo n.º 26
0
void
zfs_context_init(void)
{
	uint64_t kern_mem_size;

	zfs_lock_attr = lck_attr_alloc_init();
	zfs_group_attr = lck_grp_attr_alloc_init();
#if 0
	lck_attr_setdebug(zfs_lock_attr);
#endif
	zfs_mutex_group  = lck_grp_alloc_init("zfs-mutex", zfs_group_attr);
	zfs_rwlock_group = lck_grp_alloc_init("zfs-rwlock", zfs_group_attr);
	zfs_spinlock_group = lck_grp_alloc_init("zfs-spinlock", zfs_group_attr);

	zfs_kmem_alloc_tag = OSMalloc_Tagalloc("ZFS general purpose", 
			OSMT_DEFAULT);

	max_ncpus = 1;

	/* kernel memory space is 4 GB max */
	kern_mem_size = MIN(max_mem, (uint64_t)0x0FFFFFFFFULL);

	/* Calculate number of pages of memory on the system */
	physmem = kern_mem_size / PAGE_SIZE;

	/* Constrain our memory use on smaller memory systems */
	if (kern_mem_size <= 0x20000000)
		zfs_footprint.maximum = kern_mem_size / 7;    /* 512MB: ~15 % */
	else if (kern_mem_size <= 0x30000000)
		zfs_footprint.maximum = kern_mem_size / 5;    /* 768MB: ~20 % */
	else if (kern_mem_size <= 0x40000000)
		zfs_footprint.maximum = kern_mem_size / 3;    /* 1GB: ~33 % */
	else	/* set to 1GB limit maximum*/ 
		zfs_footprint.maximum = MIN((kern_mem_size / 2), 0x40000000);

	recalc_target_footprint(100);

	printf("zfs_context_init: footprint.maximum=%lu, footprint.target=%lu\n",
		zfs_footprint.maximum, zfs_footprint.target);
}
Exemplo n.º 27
0
/*
 * Initialise reassembly queue and fragment identifier.
 */
void
frag6_init(void)
{
	/* ip6q_alloc() uses mbufs for IPv6 fragment queue structures */
	_CASSERT(sizeof (struct ip6q) <= _MLEN);
	/* ip6af_alloc() uses mbufs for IPv6 fragment queue structures */
	_CASSERT(sizeof (struct ip6asfrag) <= _MLEN);

	/* IPv6 fragment reassembly queue lock */
	ip6qlock_grp_attr  = lck_grp_attr_alloc_init();
	ip6qlock_grp = lck_grp_alloc_init("ip6qlock", ip6qlock_grp_attr);
	ip6qlock_attr = lck_attr_alloc_init();
	lck_mtx_init(&ip6qlock, ip6qlock_grp, ip6qlock_attr);

	lck_mtx_lock(&ip6qlock);
	/* Initialize IPv6 reassembly queue. */
	ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;

	/* same limits as IPv4 */
	ip6_maxfragpackets = nmbclusters / 32;
	ip6_maxfrags = ip6_maxfragpackets * 2;
	ip6q_updateparams();
	lck_mtx_unlock(&ip6qlock);
}
Exemplo n.º 28
0
int
union_init(__unused struct vfsconf *vfsp)
{
	int i;

	union_lck_grp_attr= lck_grp_attr_alloc_init();
#if DIAGNOSTIC
	lck_grp_attr_setstat(union_lck_grp_attr);
#endif
	union_lck_grp = lck_grp_alloc_init("union",  union_lck_grp_attr);
	union_lck_attr = lck_attr_alloc_init();
#if DIAGNOSTIC
	lck_attr_setdebug(union_lck_attr);
#endif
	union_mtxp = lck_mtx_alloc_init(union_lck_grp, union_lck_attr);

	for (i = 0; i < NHASH; i++)
		LIST_INIT(&unhead[i]);
	bzero((caddr_t) unvplock, sizeof(unvplock));
	/* add the hook for getdirentries */
	union_dircheckp = union_dircheck;
	
	return (0);
}
Exemplo n.º 29
0
/*
 * IP6 initialization: fill in IP6 protocol switch table.
 * All protocols not implemented in kernel go to raw IP6 protocol handler.
 */
void
ip6_init()
{
	struct ip6protosw *pr;
	int i;
	struct timeval tv;

#if DIAGNOSTIC
	if (sizeof(struct protosw) != sizeof(struct ip6protosw))
		panic("sizeof(protosw) != sizeof(ip6protosw)");
#endif
	pr = (struct ip6protosw *)pffindproto_locked(PF_INET6, IPPROTO_RAW, SOCK_RAW);
	if (pr == 0)
		panic("ip6_init");
	for (i = 0; i < IPPROTO_MAX; i++)
		ip6_protox[i] = pr;
	for (pr = (struct ip6protosw*)inet6domain.dom_protosw; pr; pr = pr->pr_next) {
		if(!(pr->pr_domain)) continue;    /* If uninitialized, skip */
		if (pr->pr_domain->dom_family == PF_INET6 &&
		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
			ip6_protox[pr->pr_protocol] = pr;
		}
	}

	ip6_mutex_grp_attr  = lck_grp_attr_alloc_init();

	ip6_mutex_grp = lck_grp_alloc_init("ip6", ip6_mutex_grp_attr);
	ip6_mutex_attr = lck_attr_alloc_init();

	if ((ip6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc ip6_mutex\n");
	}
	if ((dad6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc dad6_mutex\n");
	}
	if ((nd6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc nd6_mutex\n");
	}

	if ((prefix6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc prefix6_mutex\n");
	}

	if ((scope6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc scope6_mutex\n");
	}


	inet6domain.dom_flags = DOM_REENTRANT;	

	ip6intrq.ifq_maxlen = ip6qmaxlen;
	in6_ifaddr_init();
	nd6_init();
	frag6_init();
	icmp6_init();
	/*
	 * in many cases, random() here does NOT return random number
	 * as initialization during bootstrap time occur in fixed order.
	 */
	microtime(&tv);
	ip6_flow_seq = random() ^ tv.tv_usec;
	microtime(&tv);
	ip6_desync_factor = (random() ^ tv.tv_usec) % MAX_TEMP_DESYNC_FACTOR;
	timeout(ip6_init2, (caddr_t)0, 1 * hz);

	lck_mtx_unlock(domain_proto_mtx);	
	proto_register_input(PF_INET6, ip6_proto_input, NULL, 0);
	lck_mtx_lock(domain_proto_mtx);	
}
Exemplo n.º 30
0
void
bsd_init(void)
{
	struct uthread *ut;
	unsigned int i;
	struct vfs_context context;
	kern_return_t	ret;
	struct ucred temp_cred;
	struct posix_cred temp_pcred;
#if NFSCLIENT || CONFIG_IMAGEBOOT
	boolean_t       netboot = FALSE;
#endif

#define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */

	throttle_init();

	printf(copyright);
	
	bsd_init_kprintf("calling kmeminit\n");
	kmeminit();
	
	bsd_init_kprintf("calling parse_bsd_args\n");
	parse_bsd_args();

#if CONFIG_DEV_KMEM
	bsd_init_kprintf("calling dev_kmem_init\n");
	dev_kmem_init();
#endif

	/* Initialize kauth subsystem before instancing the first credential */
	bsd_init_kprintf("calling kauth_init\n");
	kauth_init();

	/* Initialize process and pgrp structures. */
	bsd_init_kprintf("calling procinit\n");
	procinit();

	/* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/
	tty_init();

	kernproc = &proc0;	/* implicitly bzero'ed */

	/* kernel_task->proc = kernproc; */
	set_bsdtask_info(kernel_task,(void *)kernproc);

	/* give kernproc a name */
	bsd_init_kprintf("calling process_name\n");
	process_name("kernel_task", kernproc);

	/* allocate proc lock group attribute and group */
	bsd_init_kprintf("calling lck_grp_attr_alloc_init\n");
	proc_lck_grp_attr= lck_grp_attr_alloc_init();

	proc_lck_grp = lck_grp_alloc_init("proc",  proc_lck_grp_attr);
#if CONFIG_FINE_LOCK_GROUPS
	proc_slock_grp = lck_grp_alloc_init("proc-slock",  proc_lck_grp_attr);
	proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock",  proc_lck_grp_attr);
	proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock",  proc_lck_grp_attr);
	proc_mlock_grp = lck_grp_alloc_init("proc-mlock",  proc_lck_grp_attr);
#endif
	/* Allocate proc lock attribute */
	proc_lck_attr = lck_attr_alloc_init();
#if 0
#if __PROC_INTERNAL_DEBUG
	lck_attr_setdebug(proc_lck_attr);
#endif
#endif

#if CONFIG_FINE_LOCK_GROUPS
	proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr);
#else
	proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr);
#endif

	assert(bsd_simul_execs != 0);
	execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	execargs_cache_size = bsd_simul_execs;
	execargs_free_count = bsd_simul_execs;
	execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t));
	bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t));
	
	if (current_task() != kernel_task)
		printf("bsd_init: We have a problem, "
				"current task is not kernel task\n");
	
	bsd_init_kprintf("calling get_bsdthread_info\n");
	ut = (uthread_t)get_bsdthread_info(current_thread());

#if CONFIG_MACF
	/*
	 * Initialize the MAC Framework
	 */
	mac_policy_initbsd();
	kernproc->p_mac_enforce = 0;

#if defined (__i386__) || defined (__x86_64__)
	/*
	 * We currently only support this on i386/x86_64, as that is the
	 * only lock code we have instrumented so far.
	 */
	check_policy_init(policy_check_flags);
#endif
#endif /* MAC */

	/* Initialize System Override call */
	init_system_override();
	
	/*
	 * Create process 0.
	 */
	proc_list_lock();
	LIST_INSERT_HEAD(&allproc, kernproc, p_list);
	kernproc->p_pgrp = &pgrp0;
	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
	LIST_INIT(&pgrp0.pg_members);
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr);
#endif
	/* There is no other bsd thread this point and is safe without pgrp lock */
	LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist);
	kernproc->p_listflag |= P_LIST_INPGRP;
	kernproc->p_pgrpid = 0;
	kernproc->p_uniqueid = 0;

	pgrp0.pg_session = &session0;
	pgrp0.pg_membercnt = 1;

	session0.s_count = 1;
	session0.s_leader = kernproc;
	session0.s_listflags = 0;
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr);
#endif
	LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash);
	proc_list_unlock();

	kernproc->task = kernel_task;
	
	kernproc->p_stat = SRUN;
	kernproc->p_flag = P_SYSTEM;
	kernproc->p_lflag = 0;
	kernproc->p_ladvflag = 0;
	
#if DEVELOPMENT || DEBUG
	if (bootarg_disable_aslr)
		kernproc->p_flag |= P_DISABLE_ASLR;
#endif

	kernproc->p_nice = NZERO;
	kernproc->p_pptr = kernproc;

	TAILQ_INIT(&kernproc->p_uthlist);
	TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list);
	
	kernproc->sigwait = FALSE;
	kernproc->sigwait_thread = THREAD_NULL;
	kernproc->exit_thread = THREAD_NULL;
	kernproc->p_csflags = CS_VALID;

	/*
	 * Create credential.  This also Initializes the audit information.
	 */
	bsd_init_kprintf("calling bzero\n");
	bzero(&temp_cred, sizeof(temp_cred));
	bzero(&temp_pcred, sizeof(temp_pcred));
	temp_pcred.cr_ngroups = 1;
	/* kern_proc, shouldn't call up to DS for group membership */
	temp_pcred.cr_flags = CRF_NOMEMBERD;
	temp_cred.cr_audit.as_aia_p = audit_default_aia_p;
	
	bsd_init_kprintf("calling kauth_cred_create\n");
	/*
	 * We have to label the temp cred before we create from it to
	 * properly set cr_ngroups, or the create will fail.
	 */
	posix_cred_label(&temp_cred, &temp_pcred);
	kernproc->p_ucred = kauth_cred_create(&temp_cred); 

	/* update cred on proc */
	PROC_UPDATE_CREDS_ONPROC(kernproc);

	/* give the (already exisiting) initial thread a reference on it */
	bsd_init_kprintf("calling kauth_cred_ref\n");
	kauth_cred_ref(kernproc->p_ucred);
	ut->uu_context.vc_ucred = kernproc->p_ucred;
	ut->uu_context.vc_thread = current_thread();

	TAILQ_INIT(&kernproc->p_aio_activeq);
	TAILQ_INIT(&kernproc->p_aio_doneq);
	kernproc->p_aio_total_count = 0;
	kernproc->p_aio_active_count = 0;

	bsd_init_kprintf("calling file_lock_init\n");
	file_lock_init();

#if CONFIG_MACF
	mac_cred_label_associate_kernel(kernproc->p_ucred);
#endif

	/* Create the file descriptor table. */
	kernproc->p_fd = &filedesc0;
	filedesc0.fd_cmask = cmask;
	filedesc0.fd_knlistsize = -1;
	filedesc0.fd_knlist = NULL;
	filedesc0.fd_knhash = NULL;
	filedesc0.fd_knhashmask = 0;

	/* Create the limits structures. */
	kernproc->p_limit = &limit0;
	for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++)
		limit0.pl_rlimit[i].rlim_cur = 
			limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
	limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack;
	limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data;
	limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core;
	limit0.pl_refcnt = 1;

	kernproc->p_stats = &pstats0;
	kernproc->p_sigacts = &sigacts0;

	/*
	 * Charge root for one process: launchd.
	 */
	bsd_init_kprintf("calling chgproccnt\n");
	(void)chgproccnt(0, 1);

	/*
	 *	Allocate a kernel submap for pageable memory
	 *	for temporary copying (execve()).
	 */
	{
		vm_offset_t	minimum;

		bsd_init_kprintf("calling kmem_suballoc\n");
		assert(bsd_pageable_map_size != 0);
		ret = kmem_suballoc(kernel_map,
				&minimum,
				(vm_size_t)bsd_pageable_map_size,
				TRUE,
				VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_BSD),
				&bsd_pageable_map);
		if (ret != KERN_SUCCESS) 
			panic("bsd_init: Failed to allocate bsd pageable map");
	}

	/*
	 * Initialize buffers and hash links for buffers
	 *
	 * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must
	 *		happen after a credential has been associated with
	 *		the kernel task.
	 */
	bsd_init_kprintf("calling bsd_bufferinit\n");
	bsd_bufferinit();

	/* Initialize the execve() semaphore */
	bsd_init_kprintf("calling semaphore_create\n");

	if (ret != KERN_SUCCESS)
		panic("bsd_init: Failed to create execve semaphore");

	/*
	 * Initialize the calendar.
	 */
	bsd_init_kprintf("calling IOKitInitializeTime\n");
	IOKitInitializeTime();

	bsd_init_kprintf("calling ubc_init\n");
	ubc_init();

	/*
	 * Initialize device-switches.
	 */
	bsd_init_kprintf("calling devsw_init() \n");
	devsw_init();

	/* Initialize the file systems. */
	bsd_init_kprintf("calling vfsinit\n");
	vfsinit();

#if CONFIG_PROC_UUID_POLICY
	/* Initial proc_uuid_policy subsystem */
	bsd_init_kprintf("calling proc_uuid_policy_init()\n");
	proc_uuid_policy_init();
#endif

#if SOCKETS
	/* Initialize per-CPU cache allocator */
	mcache_init();

	/* Initialize mbuf's. */
	bsd_init_kprintf("calling mbinit\n");
	mbinit();
	net_str_id_init(); /* for mbuf tags */
#endif /* SOCKETS */

	/*
	 * Initializes security event auditing.
	 * XXX: Should/could this occur later?
	 */
#if CONFIG_AUDIT
	bsd_init_kprintf("calling audit_init\n");
 	audit_init();  
#endif

	/* Initialize kqueues */
	bsd_init_kprintf("calling knote_init\n");
	knote_init();

	/* Initialize for async IO */
	bsd_init_kprintf("calling aio_init\n");
	aio_init();

	/* Initialize pipes */
	bsd_init_kprintf("calling pipeinit\n");
	pipeinit();

	/* Initialize SysV shm subsystem locks; the subsystem proper is
	 * initialized through a sysctl.
	 */
#if SYSV_SHM
	bsd_init_kprintf("calling sysv_shm_lock_init\n");
	sysv_shm_lock_init();
#endif
#if SYSV_SEM
	bsd_init_kprintf("calling sysv_sem_lock_init\n");
	sysv_sem_lock_init();
#endif
#if SYSV_MSG
	bsd_init_kprintf("sysv_msg_lock_init\n");
	sysv_msg_lock_init();
#endif
	bsd_init_kprintf("calling pshm_lock_init\n");
	pshm_lock_init();
	bsd_init_kprintf("calling psem_lock_init\n");
	psem_lock_init();

	pthread_init();
	/* POSIX Shm and Sem */
	bsd_init_kprintf("calling pshm_cache_init\n");
	pshm_cache_init();
	bsd_init_kprintf("calling psem_cache_init\n");
	psem_cache_init();
	bsd_init_kprintf("calling time_zone_slock_init\n");
	time_zone_slock_init();
	bsd_init_kprintf("calling select_waitq_init\n");
	select_waitq_init();

	/*
	 * Initialize protocols.  Block reception of incoming packets
	 * until everything is ready.
	 */
	bsd_init_kprintf("calling sysctl_register_fixed\n");
	sysctl_register_fixed(); 
	bsd_init_kprintf("calling sysctl_mib_init\n");
	sysctl_mib_init();
#if NETWORKING
	bsd_init_kprintf("calling dlil_init\n");
	dlil_init();
	bsd_init_kprintf("calling proto_kpi_init\n");
	proto_kpi_init();
#endif /* NETWORKING */
#if SOCKETS
	bsd_init_kprintf("calling socketinit\n");
	socketinit();
	bsd_init_kprintf("calling domaininit\n");
	domaininit();
	iptap_init();
#if FLOW_DIVERT
	flow_divert_init();
#endif	/* FLOW_DIVERT */
#endif /* SOCKETS */

	kernproc->p_fd->fd_cdir = NULL;
	kernproc->p_fd->fd_rdir = NULL;

#if CONFIG_FREEZE
#ifndef CONFIG_MEMORYSTATUS
    #error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS"
#endif
	/* Initialise background freezing */
	bsd_init_kprintf("calling memorystatus_freeze_init\n");
	memorystatus_freeze_init();
#endif

#if CONFIG_MEMORYSTATUS
	/* Initialize kernel memory status notifications */
	bsd_init_kprintf("calling memorystatus_init\n");
	memorystatus_init();
#endif /* CONFIG_MEMORYSTATUS */

	bsd_init_kprintf("calling macx_init\n");
	macx_init();

	bsd_init_kprintf("calling acct_init\n");
	acct_init();

#ifdef GPROF
	/* Initialize kernel profiling. */
	kmstartup();
#endif

	bsd_init_kprintf("calling bsd_autoconf\n");
	bsd_autoconf();

#if CONFIG_DTRACE
	dtrace_postinit();
#endif

	/*
	 * We attach the loopback interface *way* down here to ensure
	 * it happens after autoconf(), otherwise it becomes the
	 * "primary" interface.
	 */
#include <loop.h>
#if NLOOP > 0
	bsd_init_kprintf("calling loopattach\n");
	loopattach();			/* XXX */
#endif
#if NGIF
	/* Initialize gif interface (after lo0) */
	gif_init();
#endif

#if PFLOG
	/* Initialize packet filter log interface */
	pfloginit();
#endif /* PFLOG */

#if NETHER > 0
	/* Register the built-in dlil ethernet interface family */
	bsd_init_kprintf("calling ether_family_init\n");
	ether_family_init();
#endif /* ETHER */

#if NETWORKING
	/* Call any kext code that wants to run just after network init */
	bsd_init_kprintf("calling net_init_run\n");
	net_init_run();
	
#if CONTENT_FILTER
	cfil_init();
#endif

#if PACKET_MANGLER
	pkt_mnglr_init();
#endif	

#if NECP
	/* Initialize Network Extension Control Policies */
	necp_init();
#endif

	netagent_init();

	/* register user tunnel kernel control handler */
	utun_register_control();
#if IPSEC
	ipsec_register_control();
#endif /* IPSEC */
	netsrc_init();
	nstat_init();
	tcp_cc_init();
#if MPTCP
	mptcp_control_register();
#endif /* MPTCP */
#endif /* NETWORKING */

	bsd_init_kprintf("calling vnode_pager_bootstrap\n");
	vnode_pager_bootstrap();

	bsd_init_kprintf("calling inittodr\n");
	inittodr(0);

	/* Mount the root file system. */
	while( TRUE) {
		int err;

		bsd_init_kprintf("calling setconf\n");
		setconf();
#if NFSCLIENT
		netboot = (mountroot == netboot_mountroot);
#endif

		bsd_init_kprintf("vfs_mountroot\n");
		if (0 == (err = vfs_mountroot()))
			break;
		rootdevice[0] = '\0';
#if NFSCLIENT
		if (netboot) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: failed to mount network root, error %d, %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
#endif
		printf("cannot mount root, errno = %d\n", err);
		boothowto |= RB_ASKNAME;
	}

	IOSecureBSDRoot(rootdevice);

	context.vc_thread = current_thread();
	context.vc_ucred = kernproc->p_ucred;
	mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;

	bsd_init_kprintf("calling VFS_ROOT\n");
	/* Get the vnode for '/'.  Set fdp->fd_fd.fd_cdir to reference it. */
	if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context))
		panic("bsd_init: cannot find root vnode: %s", PE_boot_args());
	rootvnode->v_flag |= VROOT;
	(void)vnode_ref(rootvnode);
	(void)vnode_put(rootvnode);
	filedesc0.fd_cdir = rootvnode;

#if NFSCLIENT
	if (netboot) {
		int err;

		netboot = TRUE;
		/* post mount setup */
		if ((err = netboot_setup()) != 0) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: NetBoot could not find root, error %d: %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
	}
#endif
	

#if CONFIG_IMAGEBOOT
	/*
	 * See if a system disk image is present. If so, mount it and
	 * switch the root vnode to point to it
	 */ 
	if (netboot == FALSE && imageboot_needed()) {
		/* 
		 * An image was found.  No turning back: we're booted
		 * with a kernel from the disk image.
		 */
		imageboot_setup(); 
	}
#endif /* CONFIG_IMAGEBOOT */
  
	/* set initial time; all other resource data is  already zero'ed */
	microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start);

#if DEVFS
	{
	    char mounthere[] = "/dev";	/* !const because of internal casting */

	    bsd_init_kprintf("calling devfs_kernel_mount\n");
	    devfs_kernel_mount(mounthere);
	}
#endif /* DEVFS */

	/* Initialize signal state for process 0. */
	bsd_init_kprintf("calling siginit\n");
	siginit(kernproc);

	bsd_init_kprintf("calling bsd_utaskbootstrap\n");
	bsd_utaskbootstrap();

#if defined(__LP64__)
	kernproc->p_flag |= P_LP64;
#endif

	pal_kernel_announce();

	bsd_init_kprintf("calling mountroot_post_hook\n");

	/* invoke post-root-mount hook */
	if (mountroot_post_hook != NULL)
		mountroot_post_hook();

#if 0 /* not yet */
	consider_zone_gc(FALSE);
#endif


	bsd_init_kprintf("done\n");
}