Example #1
0
File: IOLib.cpp Project: Prajna/xnu
void IOLibInit(void)
{
    kern_return_t ret;

    static bool libInitialized;

    if(libInitialized)
        return;	

    gIOKitPageableSpace.maps[0].address = 0;
    ret = kmem_suballoc(kernel_map,
                    &gIOKitPageableSpace.maps[0].address,
                    kIOPageableMapSize,
                    TRUE,
                    VM_FLAGS_ANYWHERE,
                    &gIOKitPageableSpace.maps[0].map);
    if (ret != KERN_SUCCESS)
        panic("failed to allocate iokit pageable map\n");

    IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);

    gIOKitPageableSpace.lock 		= lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
    gIOKitPageableSpace.maps[0].end	= gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
    gIOKitPageableSpace.hint		= 0;
    gIOKitPageableSpace.count		= 1;

    gIOMallocContiguousEntriesLock 	= lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
    queue_init( &gIOMallocContiguousEntries );

    libInitialized = true;
}
Example #2
0
int Lpx_PCB_alloc( struct socket *so,
				   struct lpxpcb *head,
				   struct proc *td )
{
    register struct lpxpcb *lpxp;
	
    DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_alloc\n"));
    	
    MALLOC(lpxp, struct lpxpcb *, sizeof *lpxp, M_PCB, M_WAITOK);
    if (lpxp == NULL) {
        DEBUG_PRINT(DEBUG_MASK_PCB_ERROR, ("Lpx_PCB_alloc:==> Failed\n"));
        return (ENOBUFS);
    }
    bzero(lpxp, sizeof(*lpxp));
    
    lpxp->lpxp_socket = so;
    if (lpxcksum)
        lpxp->lpxp_flags |= LPXP_CHECKSUM;
	
    read_random(&lpxp->lpxp_messageid, sizeof(lpxp->lpxp_messageid));
	
	lck_rw_lock_exclusive(head->lpxp_list_rw);	
    insque(lpxp, head);
	lck_rw_unlock_exclusive(head->lpxp_list_rw);
	
	lpxp->lpxp_head = head;
	
    so->so_pcb = (caddr_t)lpxp;
    //so->so_options |= SO_DONTROUTE;
	
	if (so->so_proto->pr_flags & PR_PCBLOCK) {
		
		if (head == &lpx_stream_pcb) {
			lpxp->lpxp_mtx = lck_mtx_alloc_init(stream_mtx_grp, stream_mtx_attr);
			lpxp->lpxp_mtx_grp = stream_mtx_grp;
		} else {
			lpxp->lpxp_mtx = lck_mtx_alloc_init(datagram_mtx_grp, datagram_mtx_attr);
			lpxp->lpxp_mtx_grp = datagram_mtx_grp;
		}
		
		if (lpxp->lpxp_mtx == NULL) {
			DEBUG_PRINT(DEBUG_MASK_PCB_ERROR, ("Lpx_PCB_alloc: can't alloc mutex! so=%p\n", so));
			
			FREE(lpxp, M_PCB);

			return(ENOMEM);
		}
	}
	
    return (0);
}
Example #3
0
void
init_domain(struct domain *dp)
{
	struct protosw  *pr;
	
	if ((dp->dom_mtx = lck_mtx_alloc_init(domain_proto_mtx_grp, domain_proto_mtx_attr)) == NULL) {
		printf("init_domain: can't init domain mtx for domain=%s\n", dp->dom_name);
		return;	/* we have a problem... */
	}

	if (dp->dom_init)
		(*dp->dom_init)();

	/* and then init the currently installed protos in this domain */

	for (pr = dp->dom_protosw; pr; pr = pr->pr_next) {
		if (pr->pr_usrreqs == 0)
			panic("domaininit: %ssw[%d] has no usrreqs!",
			      dp->dom_name, 
			      (int)(pr - dp->dom_protosw));

		init_proto(pr);

	}

	/* Recompute for new protocol */
	if (max_linkhdr < 16)		/* XXX - Sheesh; everything's ether? */
		max_linkhdr = 16;
	if (dp->dom_protohdrlen > max_protohdr)
		max_protohdr = dp->dom_protohdrlen;
	max_hdr = max_linkhdr + max_protohdr;
	max_datalen = MHLEN - max_hdr;
}
Example #4
0
/* initial setup done at time of sysinit */
void
pipeinit(void)
{
	nbigpipe=0;
	vm_size_t zone_size;
 
	zone_size = 8192 * sizeof(struct pipe);
        pipe_zone = zinit(sizeof(struct pipe), zone_size, 4096, "pipe zone");


	/* allocate lock group attribute and group for pipe mutexes */
	pipe_mtx_grp_attr = lck_grp_attr_alloc_init();
	pipe_mtx_grp = lck_grp_alloc_init("pipe", pipe_mtx_grp_attr);

	/* allocate the lock attribute for pipe mutexes */
	pipe_mtx_attr = lck_attr_alloc_init();

	/*
	 * Set up garbage collection for dead pipes
	 */
	zone_size = (PIPE_GARBAGE_QUEUE_LIMIT + 20) *
	    sizeof(struct pipe_garbage);
        pipe_garbage_zone = (zone_t)zinit(sizeof(struct pipe_garbage),
	    zone_size, 4096, "pipe garbage zone");
	pipe_garbage_lock = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr);
	
}
Example #5
0
funnel_t *
funnel_alloc(
	int type)
{
	lck_mtx_t	*m;
	funnel_t	*fnl;

	if (funnel_lck_grp == LCK_GRP_NULL) {
		funnel_lck_grp_attr = lck_grp_attr_alloc_init();

		funnel_lck_grp = lck_grp_alloc_init("Funnel",  funnel_lck_grp_attr);

		funnel_lck_attr = lck_attr_alloc_init();
	}
	if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
		bzero((void *)fnl, sizeof(funnel_t));
		if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
			kfree(fnl, sizeof(funnel_t));
			return(THR_FUNNEL_NULL);
		}
		fnl->fnl_mutex = m;
		fnl->fnl_type = type;
	}
	return(fnl);
}
Example #6
0
/*
 * Initialize hash links for nfsnodes
 * and build nfsnode free list.
 */
void
nfs_nhinit(void)
{
	nfs_node_hash_lck_grp = lck_grp_alloc_init("nfs_node_hash", LCK_GRP_ATTR_NULL);
	nfs_node_hash_mutex = lck_mtx_alloc_init(nfs_node_hash_lck_grp, LCK_ATTR_NULL);
	nfs_node_lck_grp = lck_grp_alloc_init("nfs_node", LCK_GRP_ATTR_NULL);
	nfs_data_lck_grp = lck_grp_alloc_init("nfs_data", LCK_GRP_ATTR_NULL);
}
Example #7
0
static inline void setup_locks() {
    /* Create locks.  Cannot be done on the stack. */
    osquery.lck_grp_attr = lck_grp_attr_alloc_init();
    lck_grp_attr_setstat(osquery.lck_grp_attr);

    osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr);

    osquery.lck_attr = lck_attr_alloc_init();

    osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr);
}
Example #8
0
static inline void setup_locks() {
  // Create locks. Cannot be done on the stack.
  osquery.lck_grp_attr = lck_grp_attr_alloc_init();
  lck_grp_attr_setstat(osquery.lck_grp_attr);
  osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr);
  osquery.lck_attr = lck_attr_alloc_init();

  // MTX is the IOCTL API handling lock.
  // This assures only one daemon will use the kernel API simultaneously.
  osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr);
}
Example #9
0
/* hv_support boot initialization */
void
hv_support_init(void) {
#if defined(__x86_64__) && CONFIG_VMX
	hv_support_available = vmx_hv_support();
#endif

	hv_support_lck_grp = lck_grp_alloc_init("hv_support", LCK_GRP_ATTR_NULL);
	assert(hv_support_lck_grp);

	hv_support_lck_mtx = lck_mtx_alloc_init(hv_support_lck_grp, LCK_ATTR_NULL);
	assert(hv_support_lck_mtx);
}
Example #10
0
File: bpf.c Project: SbIm/xnu-env
void
bpf_init(__unused void *unused)
{
#ifdef __APPLE__
	int 	i;
	int	maj;

	if (bpf_devsw_installed == 0) {
		bpf_devsw_installed = 1;

        bpf_mlock_grp_attr = lck_grp_attr_alloc_init();

        bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);

        bpf_mlock_attr = lck_attr_alloc_init();

        bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr);

		if (bpf_mlock == 0) {
			printf("bpf_init: failed to allocate bpf_mlock\n");
			bpf_devsw_installed = 0;
			return;
		}
		
		maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
		if (maj == -1) {
			if (bpf_mlock)
				lck_mtx_free(bpf_mlock, bpf_mlock_grp);
			if (bpf_mlock_attr)
				lck_attr_free(bpf_mlock_attr);
			if (bpf_mlock_grp)
				lck_grp_free(bpf_mlock_grp);
			if (bpf_mlock_grp_attr)
				lck_grp_attr_free(bpf_mlock_grp_attr);
			
			bpf_mlock = NULL;
			bpf_mlock_attr = NULL;
			bpf_mlock_grp = NULL;
			bpf_mlock_grp_attr = NULL;
			bpf_devsw_installed = 0;
			printf("bpf_init: failed to allocate a major number!\n");
			return;
		}

		for (i = 0 ; i < NBPFILTER; i++)
			bpf_make_dev_t(maj);
	}
#else
	cdevsw_add(&bpf_cdevsw);
#endif
}
Example #11
0
void
fuse_sysctl_start(void)
{
    int i;
#if OSXFUSE_ENABLE_MACFUSE_MODE
    osxfuse_lock_group  = lck_grp_alloc_init("osxfuse", NULL);
    osxfuse_sysctl_lock = lck_mtx_alloc_init(osxfuse_lock_group, NULL);
#endif

    sysctl_register_oid(&sysctl__osxfuse);
    for (i = 0; fuse_sysctl_list[i]; i++) {
       sysctl_register_oid(fuse_sysctl_list[i]);
    }
}
Example #12
0
rMutex
    rpal_mutex_create
    (

    )
{
    lck_mtx_t* mutex = NULL;
    
    lck_grp_attr_t* gattr = NULL;
    lck_attr_t* lattr = NULL;
    
    if( 0 == g_lck_group )
    {
        rpal_debug_info( "mutex group not created, creating" );
        
        gattr = lck_grp_attr_alloc_init();
        
        if( NULL == gattr )
        {
            rpal_debug_critical( "could not create mutex group" );
            return NULL;
        }
        
        lck_grp_attr_setstat( gattr );
        
        g_lck_group = lck_grp_alloc_init( "hcphbs", gattr );
        
        lck_grp_attr_free( gattr );
    }
    
    if( NULL == g_lck_group )
    {
        return NULL;
    }
    
    lattr = lck_attr_alloc_init();
    
    if( NULL != lattr )
    {
        mutex = lck_mtx_alloc_init( g_lck_group, lattr );
        lck_attr_free( lattr );
    }
    else
    {
        rpal_debug_critical( "could not create mutex attributes" );
    }
    
    return mutex;
}
Example #13
0
bool OSSymbolPool::init()
{
    count = 0;
    nBuckets = INITIAL_POOL_SIZE;
    buckets = (Bucket *) kalloc(nBuckets * sizeof(Bucket));
    ACCUMSIZE(nBuckets * sizeof(Bucket));
    if (!buckets)
        return false;

    bzero(buckets, nBuckets * sizeof(Bucket));

    poolGate = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);

    return poolGate != 0;
}
Example #14
0
/* This should only be called from the bootstrap thread. */
void
ktrace_init(void)
{
	static lck_grp_attr_t *lock_grp_attr = NULL;
	static lck_grp_t *lock_grp = NULL;
	static boolean_t initialized = FALSE;

	if (initialized) {
		return;
	}

	lock_grp_attr = lck_grp_attr_alloc_init();
	lock_grp = lck_grp_alloc_init("ktrace", lock_grp_attr);
	lck_grp_attr_free(lock_grp_attr);

	ktrace_lock = lck_mtx_alloc_init(lock_grp, LCK_ATTR_NULL);
	assert(ktrace_lock);
	initialized = TRUE;
}
Example #15
0
static errno_t alloc_locks()
{
    errno_t ret = 0;

    global_mutex_group = lck_grp_alloc_init(BUNDLE_ID, LCK_GRP_ATTR_NULL);
    if (global_mutex_group == NULL) {
        pp("lck_grp_alloc_init 失败");
        ret = ENOMEM;
    }

    if (ret == 0) {
        global_mutex = lck_mtx_alloc_init(global_mutex_group, LCK_ATTR_NULL);
        if (global_mutex == NULL) {
            pp("lck_mtx_alloc_init 失败");
            ret = ENOMEM;
        }
    }

    return ret;
}
Example #16
0
File: mcache.c Project: Algozjb/xnu
/*
 * Initialize the framework; this is currently called as part of BSD init.
 */
__private_extern__ void
mcache_init(void)
{
	mcache_bkttype_t *btp;
	unsigned int i;
	char name[32];

	ncpu = ml_get_max_cpus();

	mcache_llock_grp_attr = lck_grp_attr_alloc_init();
	mcache_llock_grp = lck_grp_alloc_init("mcache.list",
	    mcache_llock_grp_attr);
	mcache_llock_attr = lck_attr_alloc_init();
	mcache_llock = lck_mtx_alloc_init(mcache_llock_grp, mcache_llock_attr);

	mcache_zone = zinit(MCACHE_ALLOC_SIZE, 256 * MCACHE_ALLOC_SIZE,
	    PAGE_SIZE, "mcache");
	if (mcache_zone == NULL)
		panic("mcache_init: failed to allocate mcache zone\n");
	zone_change(mcache_zone, Z_CALLERACCT, FALSE);

	LIST_INIT(&mcache_head);

	for (i = 0; i < sizeof (mcache_bkttype) / sizeof (*btp); i++) {
		btp = &mcache_bkttype[i];
		(void) snprintf(name, sizeof (name), "bkt_%d",
		    btp->bt_bktsize);
		btp->bt_cache = mcache_create(name,
		    (btp->bt_bktsize + 1) * sizeof (void *), 0, 0, MCR_SLEEP);
	}

	PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof (mcache_flags));
	mcache_flags &= MCF_FLAGS_MASK;

	mcache_audit_cache = mcache_create("audit", sizeof (mcache_audit_t),
	    0, 0, MCR_SLEEP);

	mcache_reap_interval = 15 * hz;
	mcache_applyall(mcache_cache_bkt_enable);
	mcache_ready = 1;
}
RTDECL(int)  RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
{
    AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
    AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);
    RT_ASSERT_PREEMPTIBLE();

    PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAlloc(sizeof(*pThis));
    if (pThis)
    {
        pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
        Assert(g_pDarwinLockGroup);
        pThis->pMtx = lck_mtx_alloc_init(g_pDarwinLockGroup, LCK_ATTR_NULL);
        if (pThis->pMtx)
        {
            *phFastMtx = pThis;
            return VINF_SUCCESS;
        }

        RTMemFree(pThis);
    }
    return VERR_NO_MEMORY;
}
Example #18
0
int
union_init(__unused struct vfsconf *vfsp)
{
	int i;

	union_lck_grp_attr= lck_grp_attr_alloc_init();
#if DIAGNOSTIC
	lck_grp_attr_setstat(union_lck_grp_attr);
#endif
	union_lck_grp = lck_grp_alloc_init("union",  union_lck_grp_attr);
	union_lck_attr = lck_attr_alloc_init();
#if DIAGNOSTIC
	lck_attr_setdebug(union_lck_attr);
#endif
	union_mtxp = lck_mtx_alloc_init(union_lck_grp, union_lck_attr);

	for (i = 0; i < NHASH; i++)
		LIST_INIT(&unhead[i]);
	bzero((caddr_t) unvplock, sizeof(unvplock));
	/* add the hook for getdirentries */
	union_dircheckp = union_dircheck;
	
	return (0);
}
Example #19
0
/*
 * gre_ipfilter_init(), initialize resources required by ip filter
 */
errno_t gre_ipfilter_init(void)
{
#ifdef DEBUG
    printf("%s ...\n", __FUNCTION__);
#endif

    if (gre_ipf_mtx != NULL) {
#ifdef DEBUG
        printf("%s: gre_ifp_mtx already inited\n", __FUNCTION__);
#endif
        goto success;
    }

    gre_ipf_mtx = lck_mtx_alloc_init(gre_lck_grp, NULL);

    if (gre_ipf_mtx == NULL)
        goto failed;

    if (gre_ipfilter_attach()) {/* attach ip filter */
        lck_mtx_free(gre_ipf_mtx, gre_lck_grp);
        gre_ipf_mtx = NULL;
        goto failed;
    }

success:
#ifdef DEBUG
    printf("%s: done\n", __FUNCTION__);
#endif
    return 0;

failed:
#ifdef DEBUG
    printf("%s: fail\n", __FUNCTION__);
#endif
    return -1;
}
Example #20
0
/*
 * This function is called very early on in the Mach startup, from the
 * function start_kernel_threads() in osfmk/kern/startup.c.  It's called
 * in the context of the current (startup) task using a call to the
 * function kernel_thread_create() to jump into start_kernel_threads().
 * Internally, kernel_thread_create() calls thread_create_internal(),
 * which calls uthread_alloc().  The function of uthread_alloc() is
 * normally to allocate a uthread structure, and fill out the uu_sigmask,
 * uu_context fields.  It skips filling these out in the case of the "task"
 * being "kernel_task", because the order of operation is inverted.  To
 * account for that, we need to manually fill in at least the contents
 * of the uu_context.vc_ucred field so that the uthread structure can be
 * used like any other.
 */
void
bsd_init(void)
{
	struct uthread *ut;
	unsigned int i;
#if __i386__ || __x86_64__
	int error;
#endif	
	struct vfs_context context;
	kern_return_t	ret;
	struct ucred temp_cred;

#define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */

	kernel_flock = funnel_alloc(KERNEL_FUNNEL);
	if (kernel_flock == (funnel_t *)0 ) {
		panic("bsd_init: Failed to allocate kernel funnel");
	}
        
	printf(copyright);
	
	bsd_init_kprintf("calling kmeminit\n");
	kmeminit();
	
	bsd_init_kprintf("calling parse_bsd_args\n");
	parse_bsd_args();

	/* Initialize kauth subsystem before instancing the first credential */
	bsd_init_kprintf("calling kauth_init\n");
	kauth_init();

	/* Initialize process and pgrp structures. */
	bsd_init_kprintf("calling procinit\n");
	procinit();

	/* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/
	tty_init();

	kernproc = &proc0;	/* implicitly bzero'ed */

	/* kernel_task->proc = kernproc; */
	set_bsdtask_info(kernel_task,(void *)kernproc);

	/* give kernproc a name */
	bsd_init_kprintf("calling process_name\n");
	process_name("kernel_task", kernproc);

	/* allocate proc lock group attribute and group */
	bsd_init_kprintf("calling lck_grp_attr_alloc_init\n");
	proc_lck_grp_attr= lck_grp_attr_alloc_init();

	proc_lck_grp = lck_grp_alloc_init("proc",  proc_lck_grp_attr);
#ifndef CONFIG_EMBEDDED
	proc_slock_grp = lck_grp_alloc_init("proc-slock",  proc_lck_grp_attr);
	proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock",  proc_lck_grp_attr);
	proc_mlock_grp = lck_grp_alloc_init("proc-mlock",  proc_lck_grp_attr);
#endif
	/* Allocate proc lock attribute */
	proc_lck_attr = lck_attr_alloc_init();
#if 0
#if __PROC_INTERNAL_DEBUG
	lck_attr_setdebug(proc_lck_attr);
#endif
#endif

#ifdef CONFIG_EMBEDDED
	proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr);
#else	
	proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr);
#endif

	execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	execargs_cache_size = bsd_simul_execs;
	execargs_free_count = bsd_simul_execs;
	execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t));
	bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t));
	
	if (current_task() != kernel_task)
		printf("bsd_init: We have a problem, "
				"current task is not kernel task\n");
	
	bsd_init_kprintf("calling get_bsdthread_info\n");
	ut = (uthread_t)get_bsdthread_info(current_thread());

#if CONFIG_MACF
	/*
	 * Initialize the MAC Framework
	 */
	mac_policy_initbsd();
	kernproc->p_mac_enforce = 0;
#endif /* MAC */

	/*
	 * Create process 0.
	 */
	proc_list_lock();
	LIST_INSERT_HEAD(&allproc, kernproc, p_list);
	kernproc->p_pgrp = &pgrp0;
	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
	LIST_INIT(&pgrp0.pg_members);
#ifdef CONFIG_EMBEDDED
	lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr);	
#else
	lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr);
#endif
	/* There is no other bsd thread this point and is safe without pgrp lock */
	LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist);
	kernproc->p_listflag |= P_LIST_INPGRP;
	kernproc->p_pgrpid = 0;

	pgrp0.pg_session = &session0;
	pgrp0.pg_membercnt = 1;

	session0.s_count = 1;
	session0.s_leader = kernproc;
	session0.s_listflags = 0;
#ifdef CONFIG_EMBEDDED
	lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr);
#else
	lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr);
#endif
	LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash);
	proc_list_unlock();

#if CONFIG_LCTX
	kernproc->p_lctx = NULL;
#endif

	kernproc->task = kernel_task;
	
	kernproc->p_stat = SRUN;
	kernproc->p_flag = P_SYSTEM;
	kernproc->p_nice = NZERO;
	kernproc->p_pptr = kernproc;

	TAILQ_INIT(&kernproc->p_uthlist);
	TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list);
	
	kernproc->sigwait = FALSE;
	kernproc->sigwait_thread = THREAD_NULL;
	kernproc->exit_thread = THREAD_NULL;
	kernproc->p_csflags = CS_VALID;

	/*
	 * Create credential.  This also Initializes the audit information.
	 */
	bsd_init_kprintf("calling bzero\n");
	bzero(&temp_cred, sizeof(temp_cred));
	temp_cred.cr_ngroups = 1;

	temp_cred.cr_audit.as_aia_p = &audit_default_aia;
        /* XXX the following will go away with cr_au */
	temp_cred.cr_au.ai_auid = AU_DEFAUDITID;

	bsd_init_kprintf("calling kauth_cred_create\n");
	kernproc->p_ucred = kauth_cred_create(&temp_cred); 

	/* give the (already exisiting) initial thread a reference on it */
	bsd_init_kprintf("calling kauth_cred_ref\n");
	kauth_cred_ref(kernproc->p_ucred);
	ut->uu_context.vc_ucred = kernproc->p_ucred;
	ut->uu_context.vc_thread = current_thread();

	TAILQ_INIT(&kernproc->p_aio_activeq);
	TAILQ_INIT(&kernproc->p_aio_doneq);
	kernproc->p_aio_total_count = 0;
	kernproc->p_aio_active_count = 0;

	bsd_init_kprintf("calling file_lock_init\n");
	file_lock_init();

#if CONFIG_MACF
	mac_cred_label_associate_kernel(kernproc->p_ucred);
	mac_task_label_update_cred (kernproc->p_ucred, (struct task *) kernproc->task);
#endif

	/* Create the file descriptor table. */
	filedesc0.fd_refcnt = 1+1;	/* +1 so shutdown will not _FREE_ZONE */
	kernproc->p_fd = &filedesc0;
	filedesc0.fd_cmask = cmask;
	filedesc0.fd_knlistsize = -1;
	filedesc0.fd_knlist = NULL;
	filedesc0.fd_knhash = NULL;
	filedesc0.fd_knhashmask = 0;

	/* Create the limits structures. */
	kernproc->p_limit = &limit0;
	for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++)
		limit0.pl_rlimit[i].rlim_cur = 
			limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
	limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack;
	limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data;
	limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core;
	limit0.pl_refcnt = 1;

	kernproc->p_stats = &pstats0;
	kernproc->p_sigacts = &sigacts0;

	/*
	 * Charge root for two  processes: init and mach_init.
	 */
	bsd_init_kprintf("calling chgproccnt\n");
	(void)chgproccnt(0, 1);

	/*
	 *	Allocate a kernel submap for pageable memory
	 *	for temporary copying (execve()).
	 */
	{
		vm_offset_t	minimum;

		bsd_init_kprintf("calling kmem_suballoc\n");
		ret = kmem_suballoc(kernel_map,
				&minimum,
				(vm_size_t)bsd_pageable_map_size,
				TRUE,
				VM_FLAGS_ANYWHERE,
				&bsd_pageable_map);
		if (ret != KERN_SUCCESS) 
			panic("bsd_init: Failed to allocate bsd pageable map");
	}

	/*
	 * Initialize buffers and hash links for buffers
	 *
	 * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must
	 *		happen after a credential has been associated with
	 *		the kernel task.
	 */
	bsd_init_kprintf("calling bsd_bufferinit\n");
	bsd_bufferinit();

	/* Initialize the execve() semaphore */
	bsd_init_kprintf("calling semaphore_create\n");

	if (ret != KERN_SUCCESS)
		panic("bsd_init: Failed to create execve semaphore");

	/*
	 * Initialize the calendar.
	 */
	bsd_init_kprintf("calling IOKitInitializeTime\n");
	IOKitInitializeTime();

	if (turn_on_log_leaks && !new_nkdbufs)
		new_nkdbufs = 200000;
	start_kern_tracing(new_nkdbufs);
	if (turn_on_log_leaks)
		log_leaks = 1;

	bsd_init_kprintf("calling ubc_init\n");
	ubc_init();

	/* Initialize the file systems. */
	bsd_init_kprintf("calling vfsinit\n");
	vfsinit();

#if SOCKETS
	/* Initialize per-CPU cache allocator */
	mcache_init();

	/* Initialize mbuf's. */
	bsd_init_kprintf("calling mbinit\n");
	mbinit();
	net_str_id_init(); /* for mbuf tags */
#endif /* SOCKETS */

	/*
	 * Initializes security event auditing.
	 * XXX: Should/could this occur later?
	 */
#if CONFIG_AUDIT
	bsd_init_kprintf("calling audit_init\n");
 	audit_init();  
#endif

	/* Initialize kqueues */
	bsd_init_kprintf("calling knote_init\n");
	knote_init();

	/* Initialize for async IO */
	bsd_init_kprintf("calling aio_init\n");
	aio_init();

	/* Initialize pipes */
	bsd_init_kprintf("calling pipeinit\n");
	pipeinit();

	/* Initialize SysV shm subsystem locks; the subsystem proper is
	 * initialized through a sysctl.
	 */
#if SYSV_SHM
	bsd_init_kprintf("calling sysv_shm_lock_init\n");
	sysv_shm_lock_init();
#endif
#if SYSV_SEM
	bsd_init_kprintf("calling sysv_sem_lock_init\n");
	sysv_sem_lock_init();
#endif
#if SYSV_MSG
	bsd_init_kprintf("sysv_msg_lock_init\n");
	sysv_msg_lock_init();
#endif
	bsd_init_kprintf("calling pshm_lock_init\n");
	pshm_lock_init();
	bsd_init_kprintf("calling psem_lock_init\n");
	psem_lock_init();

	pthread_init();
	/* POSIX Shm and Sem */
	bsd_init_kprintf("calling pshm_cache_init\n");
	pshm_cache_init();
	bsd_init_kprintf("calling psem_cache_init\n");
	psem_cache_init();
	bsd_init_kprintf("calling time_zone_slock_init\n");
	time_zone_slock_init();

	/* Stack snapshot facility lock */
	stackshot_lock_init();
	/*
	 * Initialize protocols.  Block reception of incoming packets
	 * until everything is ready.
	 */
	bsd_init_kprintf("calling sysctl_register_fixed\n");
	sysctl_register_fixed(); 
	bsd_init_kprintf("calling sysctl_mib_init\n");
	sysctl_mib_init();
#if NETWORKING
	bsd_init_kprintf("calling dlil_init\n");
	dlil_init();
	bsd_init_kprintf("calling proto_kpi_init\n");
	proto_kpi_init();
#endif /* NETWORKING */
#if SOCKETS
	bsd_init_kprintf("calling socketinit\n");
	socketinit();
	bsd_init_kprintf("calling domaininit\n");
	domaininit();
#endif /* SOCKETS */

	kernproc->p_fd->fd_cdir = NULL;
	kernproc->p_fd->fd_rdir = NULL;

#if CONFIG_EMBEDDED
	/* Initialize kernel memory status notifications */
	bsd_init_kprintf("calling kern_memorystatus_init\n");
	kern_memorystatus_init();
#endif

#ifdef GPROF
	/* Initialize kernel profiling. */
	kmstartup();
#endif

	/* kick off timeout driven events by calling first time */
	thread_wakeup(&lbolt);
	timeout(lightning_bolt, 0, hz);

	bsd_init_kprintf("calling bsd_autoconf\n");
	bsd_autoconf();

#if CONFIG_DTRACE
	dtrace_postinit();
#endif

	/*
	 * We attach the loopback interface *way* down here to ensure
	 * it happens after autoconf(), otherwise it becomes the
	 * "primary" interface.
	 */
#include <loop.h>
#if NLOOP > 0
	bsd_init_kprintf("calling loopattach\n");
	loopattach();			/* XXX */
#endif

#if PFLOG
	/* Initialize packet filter log interface */
	pfloginit();
#endif /* PFLOG */

#if NETHER > 0
	/* Register the built-in dlil ethernet interface family */
	bsd_init_kprintf("calling ether_family_init\n");
	ether_family_init();
#endif /* ETHER */

#if NETWORKING
	/* Call any kext code that wants to run just after network init */
	bsd_init_kprintf("calling net_init_run\n");
	net_init_run();
	
	/* register user tunnel kernel control handler */
	utun_register_control();
#endif /* NETWORKING */

	bsd_init_kprintf("calling vnode_pager_bootstrap\n");
	vnode_pager_bootstrap();
#if 0
	/* XXX Hack for early debug stop */
	printf("\nabout to sleep for 10 seconds\n");
	IOSleep( 10 * 1000 );
	/* Debugger("hello"); */
#endif

	bsd_init_kprintf("calling inittodr\n");
	inittodr(0);

#if CONFIG_EMBEDDED
	{
		/* print out early VM statistics */
		kern_return_t kr1;
		vm_statistics_data_t stat;
		mach_msg_type_number_t count;

		count = HOST_VM_INFO_COUNT;
		kr1 = host_statistics(host_self(),
				      HOST_VM_INFO,
				      (host_info_t)&stat,
				      &count);
		kprintf("Mach Virtual Memory Statistics (page size of 4096) bytes\n"
			"Pages free:\t\t\t%u.\n"
			"Pages active:\t\t\t%u.\n"
			"Pages inactive:\t\t\t%u.\n"
			"Pages wired down:\t\t%u.\n"
			"\"Translation faults\":\t\t%u.\n"
			"Pages copy-on-write:\t\t%u.\n"
			"Pages zero filled:\t\t%u.\n"
			"Pages reactivated:\t\t%u.\n"
			"Pageins:\t\t\t%u.\n"
			"Pageouts:\t\t\t%u.\n"
			"Object cache: %u hits of %u lookups (%d%% hit rate)\n",

			stat.free_count,
			stat.active_count,
			stat.inactive_count,
			stat.wire_count,
			stat.faults,
			stat.cow_faults,
			stat.zero_fill_count,
			stat.reactivations,
			stat.pageins,
			stat.pageouts,
			stat.hits,
			stat.lookups,
			(stat.hits == 0) ? 100 :
			                   ((stat.lookups * 100) / stat.hits));
	}
#endif /* CONFIG_EMBEDDED */
	
	/* Mount the root file system. */
	while( TRUE) {
		int err;

		bsd_init_kprintf("calling setconf\n");
		setconf();

		bsd_init_kprintf("vfs_mountroot\n");
		if (0 == (err = vfs_mountroot()))
			break;
		rootdevice[0] = '\0';
#if NFSCLIENT
		if (mountroot == netboot_mountroot) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: failed to mount network root, error %d, %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
#endif
		printf("cannot mount root, errno = %d\n", err);
		boothowto |= RB_ASKNAME;
	}

	IOSecureBSDRoot(rootdevice);

	context.vc_thread = current_thread();
	context.vc_ucred = kernproc->p_ucred;
	mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;

	bsd_init_kprintf("calling VFS_ROOT\n");
	/* Get the vnode for '/'.  Set fdp->fd_fd.fd_cdir to reference it. */
	if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context))
		panic("bsd_init: cannot find root vnode: %s", PE_boot_args());
	rootvnode->v_flag |= VROOT;
	(void)vnode_ref(rootvnode);
	(void)vnode_put(rootvnode);
	filedesc0.fd_cdir = rootvnode;

#if NFSCLIENT
	if (mountroot == netboot_mountroot) {
		int err;
		/* post mount setup */
		if ((err = netboot_setup()) != 0) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: NetBoot could not find root, error %d: %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
	}
#endif
	

#if CONFIG_IMAGEBOOT
	/*
	 * See if a system disk image is present. If so, mount it and
	 * switch the root vnode to point to it
	 */ 
  
	if(imageboot_needed()) {
		int err;

		/* An image was found */
		if((err = imageboot_setup())) {
			/*
			 * this is not fatal. Keep trying to root
			 * off the original media
			 */
			printf("%s: imageboot could not find root, %d\n",
				__FUNCTION__, err);
		}
	}
#endif /* CONFIG_IMAGEBOOT */
  
	/* set initial time; all other resource data is  already zero'ed */
	microtime(&kernproc->p_start);
	kernproc->p_stats->p_start = kernproc->p_start;	/* for compat */

#if DEVFS
	{
	    char mounthere[] = "/dev";	/* !const because of internal casting */

	    bsd_init_kprintf("calling devfs_kernel_mount\n");
	    devfs_kernel_mount(mounthere);
	}
#endif /* DEVFS */
	
	/* Initialize signal state for process 0. */
	bsd_init_kprintf("calling siginit\n");
	siginit(kernproc);

	bsd_init_kprintf("calling bsd_utaskbootstrap\n");
	bsd_utaskbootstrap();

#if defined(__LP64__)
	kernproc->p_flag |= P_LP64;
	printf("Kernel is LP64\n");
#endif
#if __i386__ || __x86_64__
	/* this should be done after the root filesystem is mounted */
	error = set_archhandler(kernproc, CPU_TYPE_POWERPC);
	// 10/30/08 - gab: <rdar://problem/6324501>
	// if default 'translate' can't be found, see if the understudy is available
	if (ENOENT == error) {
		strlcpy(exec_archhandler_ppc.path, kRosettaStandIn_str, MAXPATHLEN);
		error = set_archhandler(kernproc, CPU_TYPE_POWERPC);
	}
	if (error) /* XXX make more generic */
		exec_archhandler_ppc.path[0] = 0;
#endif	

	bsd_init_kprintf("calling mountroot_post_hook\n");

	/* invoke post-root-mount hook */
	if (mountroot_post_hook != NULL)
		mountroot_post_hook();

#if 0 /* not yet */
	consider_zone_gc(FALSE);
#endif

	bsd_init_kprintf("done\n");
}
Example #21
0
/* ARGSUSED */
int
pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval)
{
	struct fileproc *rf, *wf;
	struct pipe *rpipe, *wpipe;
	lck_mtx_t   *pmtx;
	int fd, error;

	if ((pmtx = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr)) == NULL)
	        return (ENOMEM);
	
	rpipe = wpipe = NULL;
	if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
	        error = ENFILE;
		goto freepipes;
	}
        /*
	 * allocate the space for the normal I/O direction up
	 * front... we'll delay the allocation for the other
	 * direction until a write actually occurs (most likely it won't)...
         */
	error = pipespace(rpipe, choose_pipespace(rpipe->pipe_buffer.size, 0));
        if (error)
	        goto freepipes;

	TAILQ_INIT(&rpipe->pipe_evlist);
	TAILQ_INIT(&wpipe->pipe_evlist);

	error = falloc(p, &rf, &fd, vfs_context_current());
	if (error) {
	        goto freepipes;
	}
	retval[0] = fd;

	/*
	 * for now we'll create half-duplex pipes(refer returns section above). 
	 * this is what we've always supported..
	 */
	rf->f_flag = FREAD;
	rf->f_data = (caddr_t)rpipe;
	rf->f_ops = &pipeops;

	error = falloc(p, &wf, &fd, vfs_context_current());
	if (error) {
		fp_free(p, retval[0], rf);
	        goto freepipes;
	}
	wf->f_flag = FWRITE;
	wf->f_data = (caddr_t)wpipe;
	wf->f_ops = &pipeops;

	rpipe->pipe_peer = wpipe;
	wpipe->pipe_peer = rpipe;
	/* both structures share the same mutex */
	rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx; 

	retval[1] = fd;
#if CONFIG_MACF
	/*
	 * XXXXXXXX SHOULD NOT HOLD FILE_LOCK() XXXXXXXXXXXX
	 *
	 * struct pipe represents a pipe endpoint.  The MAC label is shared
	 * between the connected endpoints.  As a result mac_pipe_label_init() and
	 * mac_pipe_label_associate() should only be called on one of the endpoints
	 * after they have been connected.
	 */
	mac_pipe_label_init(rpipe);
	mac_pipe_label_associate(kauth_cred_get(), rpipe);
	wpipe->pipe_label = rpipe->pipe_label;
#endif
	proc_fdlock_spin(p);
	procfdtbl_releasefd(p, retval[0], NULL);
	procfdtbl_releasefd(p, retval[1], NULL);
	fp_drop(p, retval[0], rf, 1);
	fp_drop(p, retval[1], wf, 1);
	proc_fdunlock(p);


	return (0);

freepipes:
	pipeclose(rpipe); 
	pipeclose(wpipe); 
	lck_mtx_free(pmtx, pipe_mtx_grp);

	return (error);
}
Example #22
0
errno_t
FSNodeGetOrCreateFileVNodeByID(vnode_t              *vnPtr,
                               uint32_t              flags,
                               struct fuse_abi_data *feo,
                               mount_t               mp,
                               vnode_t               dvp,
                               vfs_context_t         context,
                               uint32_t             *oflags)
{
    int   err;

    vnode_t  vn    = NULLVP;
    HNodeRef hn    = NULL;

    struct fuse_vnode_data *fvdat   = NULL;
    struct fuse_data       *mntdata = NULL;
    fuse_device_t           dummy_device;

    struct fuse_abi_data fa;

    enum vtype vtyp;

    fuse_abi_data_init(&fa, feo->fad_version, fuse_entry_out_get_attr(feo));

    vtyp = IFTOVT(fuse_attr_get_mode(&fa));

    if ((vtyp >= VBAD) || (vtyp == VNON)) {
        return EINVAL;
    }

    int      markroot   = (flags & FN_IS_ROOT) ? 1 : 0;
    uint64_t size       = (flags & FN_IS_ROOT) ? 0 : fuse_attr_get_size(&fa);
    uint32_t rdev       = (flags & FN_IS_ROOT) ? 0 : fuse_attr_get_rdev(&fa);
    uint64_t generation = fuse_entry_out_get_generation(feo);

    mntdata = fuse_get_mpdata(mp);
    dummy_device = mntdata->fdev;

    err = HNodeLookupCreatingIfNecessary(dummy_device, fuse_entry_out_get_nodeid(feo),
                                         0 /* fork index */, &hn, &vn);
    if ((err == 0) && (vn == NULL)) {

        struct vnode_fsparam params;

        fvdat = (struct fuse_vnode_data *)FSNodeGenericFromHNode(hn);

        if (!fvdat->fInitialised) {

            fvdat->fInitialised = true;

            /* self */
            fvdat->vp           = NULLVP; /* hold on */
            fvdat->nodeid       = fuse_entry_out_get_nodeid(feo);
            fvdat->generation   = generation;

            /* parent */
            fvdat->parentvp     = dvp;
            if (dvp) {
                fvdat->parent_nodeid = VTOI(dvp);
            } else {
                fvdat->parent_nodeid = 0;
            }

            /* I/O */
            {
                int k;
                for (k = 0; k < FUFH_MAXTYPE; k++) {
                    FUFH_USE_RESET(&(fvdat->fufh[k]));
                }
            }

            /* flags */
            fvdat->flag         = flags;
            fvdat->c_flag       = 0;

            /* meta */

            /* XXX: truncation */
            fvdat->entry_valid.tv_sec  = (time_t)fuse_entry_out_get_entry_valid(feo);

            fvdat->entry_valid.tv_nsec = fuse_entry_out_get_entry_valid_nsec(feo);

            /* XXX: truncation */
            fvdat->attr_valid.tv_sec   = 0;

            fvdat->attr_valid.tv_nsec  = 0;

            /* XXX: truncation */
            fvdat->modify_time.tv_sec  = (time_t)fuse_attr_get_mtime(&fa);

            fvdat->modify_time.tv_nsec = fuse_attr_get_mtimensec(&fa);

            fvdat->filesize            = size;
            fvdat->nlookup             = 0;
            fvdat->vtype               = vtyp;

            /* locking */
            fvdat->createlock = lck_mtx_alloc_init(fuse_lock_group,
                                                   fuse_lock_attr);
            fvdat->creator = current_thread();
#if M_OSXFUSE_ENABLE_TSLOCKING
            fvdat->nodelock = lck_rw_alloc_init(fuse_lock_group,
                                                fuse_lock_attr);
            fvdat->nodelockowner = NULL;
            fvdat->truncatelock  = lck_rw_alloc_init(fuse_lock_group,
                                                     fuse_lock_attr);
#endif
        }

        if (err == 0) {
            params.vnfs_mp     = mp;
            params.vnfs_vtype  = vtyp;
            params.vnfs_str    = NULL;
            params.vnfs_dvp    = dvp; /* NULLVP for the root vnode */
            params.vnfs_fsnode = hn;

#if M_OSXFUSE_ENABLE_SPECFS
            if ((vtyp == VBLK) || (vtyp == VCHR)) {
                params.vnfs_vops = fuse_spec_operations;
                params.vnfs_rdev = (dev_t)rdev;
#else
            if (0) {
#endif
#if M_OSXFUSE_ENABLE_FIFOFS
            } else if (vtyp == VFIFO) {
                params.vnfs_vops = fuse_fifo_operations;
                params.vnfs_rdev = 0;
                (void)rdev;
#else
            } else if (0) {
#endif
            } else {
                params.vnfs_vops = fuse_vnode_operations;
                params.vnfs_rdev = 0;
                (void)rdev;
            }

            params.vnfs_marksystem = 0;
            params.vnfs_cnp        = NULL;
            params.vnfs_flags      = VNFS_NOCACHE | VNFS_CANTCACHE;
            params.vnfs_filesize   = size;
            params.vnfs_markroot   = markroot;

#if M_OSXFUSE_ENABLE_BIG_LOCK
            fuse_biglock_unlock(mntdata->biglock);
#endif
            err = vnode_create(VNCREATE_FLAVOR, (uint32_t)sizeof(params),
                               &params, &vn);
#if M_OSXFUSE_ENABLE_BIG_LOCK
            fuse_biglock_lock(mntdata->biglock);
#endif
        }

        if (err == 0) {
            if (markroot) {
                fvdat->parentvp = vn;
            } else {
                fvdat->parentvp = dvp;
            }
            if (oflags) {
                *oflags |= MAKEENTRY;
            }

            /* Need VT_OSXFUSE from xnu */
            vnode_settag(vn, VT_OTHER);

            cache_attrs(vn, fuse_entry_out, feo);

            HNodeAttachVNodeSucceeded(hn, 0 /* forkIndex */, vn);
            FUSE_OSAddAtomic(1, (SInt32 *)&fuse_vnodes_current);
        } else {
            if (HNodeAttachVNodeFailed(hn, 0 /* forkIndex */)) {
                FSNodeScrub(fvdat);
                HNodeScrubDone(hn);
            }
        }
    }

    if (err == 0) {
        if (vnode_vtype(vn) != vtyp) {
            IOLog("osxfuse: vnode changed type behind us (old=%d, new=%d)\n",
                  vnode_vtype(vn), vtyp);
#if M_OSXFUSE_ENABLE_BIG_LOCK
            fuse_biglock_unlock(mntdata->biglock);
#endif
            fuse_internal_vnode_disappear(vn, context, REVOKE_SOFT);
            vnode_put(vn);
#if M_OSXFUSE_ENABLE_BIG_LOCK
            fuse_biglock_lock(mntdata->biglock);
#endif
            err = EIO;
        } else if (VTOFUD(vn)->generation != generation) {
            IOLog("osxfuse: vnode changed generation\n");
#if M_OSXFUSE_ENABLE_BIG_LOCK
            fuse_biglock_unlock(mntdata->biglock);
#endif
            fuse_internal_vnode_disappear(vn, context, REVOKE_SOFT);
            vnode_put(vn);
#if M_OSXFUSE_ENABLE_BIG_LOCK
            fuse_biglock_lock(mntdata->biglock);
#endif
            err = ESTALE;
        }
    }

    if (err == 0) {
        *vnPtr = vn;
    }

    /* assert((err == 0) == (*vnPtr != NULL); */

    return err;
}

int
fuse_vget_i(vnode_t              *vpp,
            uint32_t              flags,
            struct fuse_abi_data *feo,
            struct componentname *cnp,
            vnode_t               dvp,
            mount_t               mp,
            vfs_context_t         context)
{
    int err = 0;

    if (!feo) {
        return EINVAL;
    }

    err = FSNodeGetOrCreateFileVNodeByID(vpp, flags, feo, mp, dvp,
                                         context, NULL);
    if (err) {
        return err;
    }

    if (!fuse_isnovncache_mp(mp) && (cnp->cn_flags & MAKEENTRY)) {
        fuse_vncache_enter(dvp, *vpp, cnp);
    }

/* found: */

    VTOFUD(*vpp)->nlookup++;

    return 0;
}
Example #23
0
void
domaininit(void)
{
	register struct domain *dp;

	/*
	 * allocate lock group attribute and group for domain mutexes
	 */
	domain_proto_mtx_grp_attr = lck_grp_attr_alloc_init();

	domain_proto_mtx_grp = lck_grp_alloc_init("domain", domain_proto_mtx_grp_attr);
		
	/*
	 * allocate the lock attribute for per domain mutexes
	 */
	domain_proto_mtx_attr = lck_attr_alloc_init();

	if ((domain_proto_mtx = lck_mtx_alloc_init(domain_proto_mtx_grp, domain_proto_mtx_attr)) == NULL) {
		printf("domaininit: can't init domain mtx for domain list\n");
		return;	/* we have a problem... */
	}
	/*
	 * Add all the static domains to the domains list
	 */

	lck_mtx_lock(domain_proto_mtx);

	concat_domain(&localdomain);
	concat_domain(&routedomain);
	concat_domain(&inetdomain);
#if NETAT
	concat_domain(&atalkdomain);
#endif
#if INET6
	concat_domain(&inet6domain);
#endif
#if IPSEC
	concat_domain(&keydomain);
#endif

#if NS
	concat_domain(&nsdomain);
#endif
#if ISO
	concat_domain(&isodomain);
#endif
#if CCITT
	concat_domain(&ccittdomain);
#endif
	concat_domain(&ndrvdomain);

	concat_domain(&systemdomain);

	/*
	 * Now ask them all to init (XXX including the routing domain,
	 * see above)
	 */
	for (dp = domains; dp; dp = dp->dom_next)
		init_domain(dp);

	lck_mtx_unlock(domain_proto_mtx);
	timeout(pffasttimo, NULL, 1);
	timeout(pfslowtimo, NULL, 1);
}
Example #24
0
void
bsd_init(void)
{
	struct uthread *ut;
	unsigned int i;
	struct vfs_context context;
	kern_return_t	ret;
	struct ucred temp_cred;
	struct posix_cred temp_pcred;
#if NFSCLIENT || CONFIG_IMAGEBOOT
	boolean_t       netboot = FALSE;
#endif

#define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */

	throttle_init();

	printf(copyright);
	
	bsd_init_kprintf("calling kmeminit\n");
	kmeminit();
	
	bsd_init_kprintf("calling parse_bsd_args\n");
	parse_bsd_args();

#if CONFIG_DEV_KMEM
	bsd_init_kprintf("calling dev_kmem_init\n");
	dev_kmem_init();
#endif

	/* Initialize kauth subsystem before instancing the first credential */
	bsd_init_kprintf("calling kauth_init\n");
	kauth_init();

	/* Initialize process and pgrp structures. */
	bsd_init_kprintf("calling procinit\n");
	procinit();

	/* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/
	tty_init();

	kernproc = &proc0;	/* implicitly bzero'ed */

	/* kernel_task->proc = kernproc; */
	set_bsdtask_info(kernel_task,(void *)kernproc);

	/* give kernproc a name */
	bsd_init_kprintf("calling process_name\n");
	process_name("kernel_task", kernproc);

	/* allocate proc lock group attribute and group */
	bsd_init_kprintf("calling lck_grp_attr_alloc_init\n");
	proc_lck_grp_attr= lck_grp_attr_alloc_init();

	proc_lck_grp = lck_grp_alloc_init("proc",  proc_lck_grp_attr);
#if CONFIG_FINE_LOCK_GROUPS
	proc_slock_grp = lck_grp_alloc_init("proc-slock",  proc_lck_grp_attr);
	proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock",  proc_lck_grp_attr);
	proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock",  proc_lck_grp_attr);
	proc_mlock_grp = lck_grp_alloc_init("proc-mlock",  proc_lck_grp_attr);
#endif
	/* Allocate proc lock attribute */
	proc_lck_attr = lck_attr_alloc_init();
#if 0
#if __PROC_INTERNAL_DEBUG
	lck_attr_setdebug(proc_lck_attr);
#endif
#endif

#if CONFIG_FINE_LOCK_GROUPS
	proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr);
#else
	proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr);
#endif

	assert(bsd_simul_execs != 0);
	execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	execargs_cache_size = bsd_simul_execs;
	execargs_free_count = bsd_simul_execs;
	execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t));
	bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t));
	
	if (current_task() != kernel_task)
		printf("bsd_init: We have a problem, "
				"current task is not kernel task\n");
	
	bsd_init_kprintf("calling get_bsdthread_info\n");
	ut = (uthread_t)get_bsdthread_info(current_thread());

#if CONFIG_MACF
	/*
	 * Initialize the MAC Framework
	 */
	mac_policy_initbsd();
	kernproc->p_mac_enforce = 0;

#if defined (__i386__) || defined (__x86_64__)
	/*
	 * We currently only support this on i386/x86_64, as that is the
	 * only lock code we have instrumented so far.
	 */
	check_policy_init(policy_check_flags);
#endif
#endif /* MAC */

	/* Initialize System Override call */
	init_system_override();
	
	/*
	 * Create process 0.
	 */
	proc_list_lock();
	LIST_INSERT_HEAD(&allproc, kernproc, p_list);
	kernproc->p_pgrp = &pgrp0;
	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
	LIST_INIT(&pgrp0.pg_members);
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr);
#endif
	/* There is no other bsd thread this point and is safe without pgrp lock */
	LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist);
	kernproc->p_listflag |= P_LIST_INPGRP;
	kernproc->p_pgrpid = 0;
	kernproc->p_uniqueid = 0;

	pgrp0.pg_session = &session0;
	pgrp0.pg_membercnt = 1;

	session0.s_count = 1;
	session0.s_leader = kernproc;
	session0.s_listflags = 0;
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr);
#endif
	LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash);
	proc_list_unlock();

	kernproc->task = kernel_task;
	
	kernproc->p_stat = SRUN;
	kernproc->p_flag = P_SYSTEM;
	kernproc->p_lflag = 0;
	kernproc->p_ladvflag = 0;
	
#if DEVELOPMENT || DEBUG
	if (bootarg_disable_aslr)
		kernproc->p_flag |= P_DISABLE_ASLR;
#endif

	kernproc->p_nice = NZERO;
	kernproc->p_pptr = kernproc;

	TAILQ_INIT(&kernproc->p_uthlist);
	TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list);
	
	kernproc->sigwait = FALSE;
	kernproc->sigwait_thread = THREAD_NULL;
	kernproc->exit_thread = THREAD_NULL;
	kernproc->p_csflags = CS_VALID;

	/*
	 * Create credential.  This also Initializes the audit information.
	 */
	bsd_init_kprintf("calling bzero\n");
	bzero(&temp_cred, sizeof(temp_cred));
	bzero(&temp_pcred, sizeof(temp_pcred));
	temp_pcred.cr_ngroups = 1;
	/* kern_proc, shouldn't call up to DS for group membership */
	temp_pcred.cr_flags = CRF_NOMEMBERD;
	temp_cred.cr_audit.as_aia_p = audit_default_aia_p;
	
	bsd_init_kprintf("calling kauth_cred_create\n");
	/*
	 * We have to label the temp cred before we create from it to
	 * properly set cr_ngroups, or the create will fail.
	 */
	posix_cred_label(&temp_cred, &temp_pcred);
	kernproc->p_ucred = kauth_cred_create(&temp_cred); 

	/* update cred on proc */
	PROC_UPDATE_CREDS_ONPROC(kernproc);

	/* give the (already exisiting) initial thread a reference on it */
	bsd_init_kprintf("calling kauth_cred_ref\n");
	kauth_cred_ref(kernproc->p_ucred);
	ut->uu_context.vc_ucred = kernproc->p_ucred;
	ut->uu_context.vc_thread = current_thread();

	TAILQ_INIT(&kernproc->p_aio_activeq);
	TAILQ_INIT(&kernproc->p_aio_doneq);
	kernproc->p_aio_total_count = 0;
	kernproc->p_aio_active_count = 0;

	bsd_init_kprintf("calling file_lock_init\n");
	file_lock_init();

#if CONFIG_MACF
	mac_cred_label_associate_kernel(kernproc->p_ucred);
#endif

	/* Create the file descriptor table. */
	kernproc->p_fd = &filedesc0;
	filedesc0.fd_cmask = cmask;
	filedesc0.fd_knlistsize = -1;
	filedesc0.fd_knlist = NULL;
	filedesc0.fd_knhash = NULL;
	filedesc0.fd_knhashmask = 0;

	/* Create the limits structures. */
	kernproc->p_limit = &limit0;
	for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++)
		limit0.pl_rlimit[i].rlim_cur = 
			limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
	limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack;
	limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data;
	limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core;
	limit0.pl_refcnt = 1;

	kernproc->p_stats = &pstats0;
	kernproc->p_sigacts = &sigacts0;

	/*
	 * Charge root for one process: launchd.
	 */
	bsd_init_kprintf("calling chgproccnt\n");
	(void)chgproccnt(0, 1);

	/*
	 *	Allocate a kernel submap for pageable memory
	 *	for temporary copying (execve()).
	 */
	{
		vm_offset_t	minimum;

		bsd_init_kprintf("calling kmem_suballoc\n");
		assert(bsd_pageable_map_size != 0);
		ret = kmem_suballoc(kernel_map,
				&minimum,
				(vm_size_t)bsd_pageable_map_size,
				TRUE,
				VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_BSD),
				&bsd_pageable_map);
		if (ret != KERN_SUCCESS) 
			panic("bsd_init: Failed to allocate bsd pageable map");
	}

	/*
	 * Initialize buffers and hash links for buffers
	 *
	 * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must
	 *		happen after a credential has been associated with
	 *		the kernel task.
	 */
	bsd_init_kprintf("calling bsd_bufferinit\n");
	bsd_bufferinit();

	/* Initialize the execve() semaphore */
	bsd_init_kprintf("calling semaphore_create\n");

	if (ret != KERN_SUCCESS)
		panic("bsd_init: Failed to create execve semaphore");

	/*
	 * Initialize the calendar.
	 */
	bsd_init_kprintf("calling IOKitInitializeTime\n");
	IOKitInitializeTime();

	bsd_init_kprintf("calling ubc_init\n");
	ubc_init();

	/*
	 * Initialize device-switches.
	 */
	bsd_init_kprintf("calling devsw_init() \n");
	devsw_init();

	/* Initialize the file systems. */
	bsd_init_kprintf("calling vfsinit\n");
	vfsinit();

#if CONFIG_PROC_UUID_POLICY
	/* Initial proc_uuid_policy subsystem */
	bsd_init_kprintf("calling proc_uuid_policy_init()\n");
	proc_uuid_policy_init();
#endif

#if SOCKETS
	/* Initialize per-CPU cache allocator */
	mcache_init();

	/* Initialize mbuf's. */
	bsd_init_kprintf("calling mbinit\n");
	mbinit();
	net_str_id_init(); /* for mbuf tags */
#endif /* SOCKETS */

	/*
	 * Initializes security event auditing.
	 * XXX: Should/could this occur later?
	 */
#if CONFIG_AUDIT
	bsd_init_kprintf("calling audit_init\n");
 	audit_init();  
#endif

	/* Initialize kqueues */
	bsd_init_kprintf("calling knote_init\n");
	knote_init();

	/* Initialize for async IO */
	bsd_init_kprintf("calling aio_init\n");
	aio_init();

	/* Initialize pipes */
	bsd_init_kprintf("calling pipeinit\n");
	pipeinit();

	/* Initialize SysV shm subsystem locks; the subsystem proper is
	 * initialized through a sysctl.
	 */
#if SYSV_SHM
	bsd_init_kprintf("calling sysv_shm_lock_init\n");
	sysv_shm_lock_init();
#endif
#if SYSV_SEM
	bsd_init_kprintf("calling sysv_sem_lock_init\n");
	sysv_sem_lock_init();
#endif
#if SYSV_MSG
	bsd_init_kprintf("sysv_msg_lock_init\n");
	sysv_msg_lock_init();
#endif
	bsd_init_kprintf("calling pshm_lock_init\n");
	pshm_lock_init();
	bsd_init_kprintf("calling psem_lock_init\n");
	psem_lock_init();

	pthread_init();
	/* POSIX Shm and Sem */
	bsd_init_kprintf("calling pshm_cache_init\n");
	pshm_cache_init();
	bsd_init_kprintf("calling psem_cache_init\n");
	psem_cache_init();
	bsd_init_kprintf("calling time_zone_slock_init\n");
	time_zone_slock_init();
	bsd_init_kprintf("calling select_waitq_init\n");
	select_waitq_init();

	/*
	 * Initialize protocols.  Block reception of incoming packets
	 * until everything is ready.
	 */
	bsd_init_kprintf("calling sysctl_register_fixed\n");
	sysctl_register_fixed(); 
	bsd_init_kprintf("calling sysctl_mib_init\n");
	sysctl_mib_init();
#if NETWORKING
	bsd_init_kprintf("calling dlil_init\n");
	dlil_init();
	bsd_init_kprintf("calling proto_kpi_init\n");
	proto_kpi_init();
#endif /* NETWORKING */
#if SOCKETS
	bsd_init_kprintf("calling socketinit\n");
	socketinit();
	bsd_init_kprintf("calling domaininit\n");
	domaininit();
	iptap_init();
#if FLOW_DIVERT
	flow_divert_init();
#endif	/* FLOW_DIVERT */
#endif /* SOCKETS */

	kernproc->p_fd->fd_cdir = NULL;
	kernproc->p_fd->fd_rdir = NULL;

#if CONFIG_FREEZE
#ifndef CONFIG_MEMORYSTATUS
    #error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS"
#endif
	/* Initialise background freezing */
	bsd_init_kprintf("calling memorystatus_freeze_init\n");
	memorystatus_freeze_init();
#endif

#if CONFIG_MEMORYSTATUS
	/* Initialize kernel memory status notifications */
	bsd_init_kprintf("calling memorystatus_init\n");
	memorystatus_init();
#endif /* CONFIG_MEMORYSTATUS */

	bsd_init_kprintf("calling macx_init\n");
	macx_init();

	bsd_init_kprintf("calling acct_init\n");
	acct_init();

#ifdef GPROF
	/* Initialize kernel profiling. */
	kmstartup();
#endif

	bsd_init_kprintf("calling bsd_autoconf\n");
	bsd_autoconf();

#if CONFIG_DTRACE
	dtrace_postinit();
#endif

	/*
	 * We attach the loopback interface *way* down here to ensure
	 * it happens after autoconf(), otherwise it becomes the
	 * "primary" interface.
	 */
#include <loop.h>
#if NLOOP > 0
	bsd_init_kprintf("calling loopattach\n");
	loopattach();			/* XXX */
#endif
#if NGIF
	/* Initialize gif interface (after lo0) */
	gif_init();
#endif

#if PFLOG
	/* Initialize packet filter log interface */
	pfloginit();
#endif /* PFLOG */

#if NETHER > 0
	/* Register the built-in dlil ethernet interface family */
	bsd_init_kprintf("calling ether_family_init\n");
	ether_family_init();
#endif /* ETHER */

#if NETWORKING
	/* Call any kext code that wants to run just after network init */
	bsd_init_kprintf("calling net_init_run\n");
	net_init_run();
	
#if CONTENT_FILTER
	cfil_init();
#endif

#if PACKET_MANGLER
	pkt_mnglr_init();
#endif	

#if NECP
	/* Initialize Network Extension Control Policies */
	necp_init();
#endif

	netagent_init();

	/* register user tunnel kernel control handler */
	utun_register_control();
#if IPSEC
	ipsec_register_control();
#endif /* IPSEC */
	netsrc_init();
	nstat_init();
	tcp_cc_init();
#if MPTCP
	mptcp_control_register();
#endif /* MPTCP */
#endif /* NETWORKING */

	bsd_init_kprintf("calling vnode_pager_bootstrap\n");
	vnode_pager_bootstrap();

	bsd_init_kprintf("calling inittodr\n");
	inittodr(0);

	/* Mount the root file system. */
	while( TRUE) {
		int err;

		bsd_init_kprintf("calling setconf\n");
		setconf();
#if NFSCLIENT
		netboot = (mountroot == netboot_mountroot);
#endif

		bsd_init_kprintf("vfs_mountroot\n");
		if (0 == (err = vfs_mountroot()))
			break;
		rootdevice[0] = '\0';
#if NFSCLIENT
		if (netboot) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: failed to mount network root, error %d, %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
#endif
		printf("cannot mount root, errno = %d\n", err);
		boothowto |= RB_ASKNAME;
	}

	IOSecureBSDRoot(rootdevice);

	context.vc_thread = current_thread();
	context.vc_ucred = kernproc->p_ucred;
	mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;

	bsd_init_kprintf("calling VFS_ROOT\n");
	/* Get the vnode for '/'.  Set fdp->fd_fd.fd_cdir to reference it. */
	if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context))
		panic("bsd_init: cannot find root vnode: %s", PE_boot_args());
	rootvnode->v_flag |= VROOT;
	(void)vnode_ref(rootvnode);
	(void)vnode_put(rootvnode);
	filedesc0.fd_cdir = rootvnode;

#if NFSCLIENT
	if (netboot) {
		int err;

		netboot = TRUE;
		/* post mount setup */
		if ((err = netboot_setup()) != 0) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: NetBoot could not find root, error %d: %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
	}
#endif
	

#if CONFIG_IMAGEBOOT
	/*
	 * See if a system disk image is present. If so, mount it and
	 * switch the root vnode to point to it
	 */ 
	if (netboot == FALSE && imageboot_needed()) {
		/* 
		 * An image was found.  No turning back: we're booted
		 * with a kernel from the disk image.
		 */
		imageboot_setup(); 
	}
#endif /* CONFIG_IMAGEBOOT */
  
	/* set initial time; all other resource data is  already zero'ed */
	microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start);

#if DEVFS
	{
	    char mounthere[] = "/dev";	/* !const because of internal casting */

	    bsd_init_kprintf("calling devfs_kernel_mount\n");
	    devfs_kernel_mount(mounthere);
	}
#endif /* DEVFS */

	/* Initialize signal state for process 0. */
	bsd_init_kprintf("calling siginit\n");
	siginit(kernproc);

	bsd_init_kprintf("calling bsd_utaskbootstrap\n");
	bsd_utaskbootstrap();

#if defined(__LP64__)
	kernproc->p_flag |= P_LP64;
#endif

	pal_kernel_announce();

	bsd_init_kprintf("calling mountroot_post_hook\n");

	/* invoke post-root-mount hook */
	if (mountroot_post_hook != NULL)
		mountroot_post_hook();

#if 0 /* not yet */
	consider_zone_gc(FALSE);
#endif


	bsd_init_kprintf("done\n");
}
Example #25
0
/*
 * IP6 initialization: fill in IP6 protocol switch table.
 * All protocols not implemented in kernel go to raw IP6 protocol handler.
 */
void
ip6_init()
{
	struct ip6protosw *pr;
	int i;
	struct timeval tv;

#if DIAGNOSTIC
	if (sizeof(struct protosw) != sizeof(struct ip6protosw))
		panic("sizeof(protosw) != sizeof(ip6protosw)");
#endif
	pr = (struct ip6protosw *)pffindproto_locked(PF_INET6, IPPROTO_RAW, SOCK_RAW);
	if (pr == 0)
		panic("ip6_init");
	for (i = 0; i < IPPROTO_MAX; i++)
		ip6_protox[i] = pr;
	for (pr = (struct ip6protosw*)inet6domain.dom_protosw; pr; pr = pr->pr_next) {
		if(!(pr->pr_domain)) continue;    /* If uninitialized, skip */
		if (pr->pr_domain->dom_family == PF_INET6 &&
		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
			ip6_protox[pr->pr_protocol] = pr;
		}
	}

	ip6_mutex_grp_attr  = lck_grp_attr_alloc_init();

	ip6_mutex_grp = lck_grp_alloc_init("ip6", ip6_mutex_grp_attr);
	ip6_mutex_attr = lck_attr_alloc_init();

	if ((ip6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc ip6_mutex\n");
	}
	if ((dad6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc dad6_mutex\n");
	}
	if ((nd6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc nd6_mutex\n");
	}

	if ((prefix6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc prefix6_mutex\n");
	}

	if ((scope6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) {
		panic("ip6_init: can't alloc scope6_mutex\n");
	}


	inet6domain.dom_flags = DOM_REENTRANT;	

	ip6intrq.ifq_maxlen = ip6qmaxlen;
	in6_ifaddr_init();
	nd6_init();
	frag6_init();
	icmp6_init();
	/*
	 * in many cases, random() here does NOT return random number
	 * as initialization during bootstrap time occur in fixed order.
	 */
	microtime(&tv);
	ip6_flow_seq = random() ^ tv.tv_usec;
	microtime(&tv);
	ip6_desync_factor = (random() ^ tv.tv_usec) % MAX_TEMP_DESYNC_FACTOR;
	timeout(ip6_init2, (caddr_t)0, 1 * hz);

	lck_mtx_unlock(domain_proto_mtx);	
	proto_register_input(PF_INET6, ip6_proto_input, NULL, 0);
	lck_mtx_lock(domain_proto_mtx);	
}
Example #26
0
lck_mtx_t *
mutex_alloc_EXT(
	__unused unsigned short		tag)
{
	return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
}
Example #27
0
/*
 * Initialize static data used in this file, which is required when the first
 * mount occurs.
 */
void
procfsnode_start_init(void) {
    // Allocate the lock group and the mutex lock for the hash table.
    procfsnode_lck_grp = lck_grp_alloc_init("com.kadmas.procfs.procfsnode_locks", LCK_GRP_ATTR_NULL);
    procfsnode_hash_mutex = lck_mtx_alloc_init(procfsnode_lck_grp, LCK_ATTR_NULL);
}
Example #28
0
static int
vfs_mount_9p(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
{
#pragma unused(devvp)
	struct sockaddr *addr, *authaddr;
	struct vfsstatfs *sp;
	char authkey[DESKEYLEN+1];
	kauth_cred_t cred;
	user_args_9p args;
	mount_9p *nmp;
	size_t size;
	fid_9p fid;
	qid_9p qid;
	char *vers;
	int e;

	TRACE();
	nmp = NULL;
	addr = NULL;
	authaddr = NULL;
	fid = NOFID;

	if (vfs_isupdate(mp))
		return ENOTSUP;

	if (vfs_context_is64bit(ctx)) {
		if ((e=copyin(data, &args, sizeof(args))))
			goto error;
	} else {
		args_9p args32;
		if ((e=copyin(data, &args32, sizeof(args32))))
			goto error;
		args.spec			= CAST_USER_ADDR_T(args32.spec);
		args.addr			= CAST_USER_ADDR_T(args32.addr);
		args.addrlen		= args32.addrlen;
		args.authaddr		= CAST_USER_ADDR_T(args32.authaddr);
		args.authaddrlen	= args32.authaddrlen;
		args.volume			= CAST_USER_ADDR_T(args32.volume);
		args.uname			= CAST_USER_ADDR_T(args32.uname);
		args.aname			= CAST_USER_ADDR_T(args32.aname);
		args.authkey		= CAST_USER_ADDR_T(args32.authkey);
		args.flags			= args32.flags;
	}
	e = ENOMEM;
	nmp = malloc_9p(sizeof(*nmp));
	if (nmp == NULL)
		return e;

	nmp->mp = mp;
	TAILQ_INIT(&nmp->req);
	nmp->lck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL);
	nmp->reqlck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL);
	nmp->nodelck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL);
	nmp->node = hashinit(desiredvnodes, M_TEMP, &nmp->nodelen);
	if (nmp->lck==NULL || nmp->reqlck==NULL || nmp->nodelck==NULL || nmp->node==NULL)
		goto error;

	if ((e=nameget_9p(args.volume, &nmp->volume)))
		goto error;
	if ((e=nameget_9p(args.uname, &nmp->uname)))
		goto error;
	if ((e=nameget_9p(args.aname, &nmp->aname)))
		goto error;

	cred = vfs_context_ucred(ctx);
	if (IS_VALID_CRED(cred)) {
		nmp->uid = kauth_cred_getuid(cred);
		nmp->gid = kauth_cred_getgid(cred);
	} else {
		nmp->uid = KAUTH_UID_NONE;
		nmp->gid = KAUTH_GID_NONE;
	}
	
	vfs_getnewfsid(mp);
	vfs_setfsprivate(mp, nmp);
	
	nmp->flags = args.flags;
	if ((e=addrget_9p(args.addr, args.addrlen, &addr)))
		goto error;
	if ((e=connect_9p(nmp, addr)))
		goto error;

	vers = VERSION9P;
	if (ISSET(nmp->flags, FLAG_DOTU))
		vers = VERSION9PDOTU;
	if ((e=version_9p(nmp, vers, &nmp->version)))
		goto error;
	if (ISSET(nmp->flags, FLAG_DOTU) && strcmp(VERSION9PDOTU, nmp->version)==0)
		SET(nmp->flags, F_DOTU);

	nmp->afid = NOFID;
	if (args.authaddr && args.authaddrlen && args.authkey) {
		if ((e=copyin(args.authkey, authkey, DESKEYLEN)))
			goto error;
		if ((e=addrget_9p(args.authaddr, args.authaddrlen, &authaddr)))
			goto error;
		if ((e=auth_9p(nmp, nmp->uname, nmp->aname, nmp->uid, &nmp->afid, &qid)))
			goto error;
		if (nmp->afid!=NOFID &&
			(e=authp9any_9p(nmp, nmp->afid, authaddr, nmp->uname, authkey)))
			goto error;
		bzero(authkey, DESKEYLEN);
	}
	if ((e=attach_9p(nmp, nmp->uname, nmp->aname, nmp->afid, nmp->uid, &fid, &qid)))
		goto error;

	if ((e=nget_9p(nmp, fid, qid, NULL, &nmp->root, NULL, ctx)))
		goto error;

	nunlock_9p(NTO9P(nmp->root));
	e = vnode_ref(nmp->root);
	vnode_put(nmp->root);
	if (e)
		goto error;

	vfs_setauthopaque(mp);
	vfs_clearauthopaqueaccess(mp);
	vfs_setlocklocal(mp);

	// init stats
	sp = vfs_statfs(nmp->mp);
	copyinstr(args.spec, sp->f_mntfromname, MNAMELEN-1, &size);
	bzero(sp->f_mntfromname+size, MNAMELEN-size);
	sp->f_bsize = PAGE_SIZE;
	sp->f_iosize = nmp->msize-IOHDRSZ;
	sp->f_blocks = sp->f_bfree = sp->f_bavail = sp->f_bused = 0;
	sp->f_files = 65535;
	sp->f_ffree = sp->f_files-2;
	sp->f_flags = vfs_flags(mp);
	
	free_9p(addr);
	free_9p(authaddr);
	return 0;

error:
	bzero(authkey, DESKEYLEN);
	free_9p(addr);
	free_9p(authaddr);
	if (nmp->so) {
		clunk_9p(nmp, fid);
		disconnect_9p(nmp);
	}
	freemount_9p(nmp);
	vfs_setfsprivate(mp, NULL);
	return e;
}