Exemplo n.º 1
0
/*
 * Initialize hash links for nfsnodes
 * and build nfsnode free list.
 */
void
nfs_nhinit(void)
{
	nfs_node_hash_lck_grp = lck_grp_alloc_init("nfs_node_hash", LCK_GRP_ATTR_NULL);
	nfs_node_hash_mutex = lck_mtx_alloc_init(nfs_node_hash_lck_grp, LCK_ATTR_NULL);
	nfs_node_lck_grp = lck_grp_alloc_init("nfs_node", LCK_GRP_ATTR_NULL);
	nfs_data_lck_grp = lck_grp_alloc_init("nfs_data", LCK_GRP_ATTR_NULL);
}
Exemplo n.º 2
0
/*
 * Initialize the eventhandler mutex and list.
 */
void
eventhandler_init(void)
{
	eventhandler_mutex_grp_attr = lck_grp_attr_alloc_init();
	eventhandler_mutex_grp = lck_grp_alloc_init("eventhandler",
	    eventhandler_mutex_grp_attr);
	eventhandler_mutex_attr = lck_attr_alloc_init();

	el_lock_grp_attr = lck_grp_attr_alloc_init();
	el_lock_grp = lck_grp_alloc_init("eventhandler list",
	    el_lock_grp_attr);
	el_lock_attr = lck_attr_alloc_init();

	eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb);
}
Exemplo n.º 3
0
/**
 * Mount helper: allocate locking group attribute and locking group itself.
 * Store allocated data into VBoxVFS private data.
 *
 * @param pMount   VBoxVFS global data which will be updated with
 *                          locking group and its attribute in case of success;
 *                          otherwise pMount unchanged.
 *
 * @return 0 on success or BSD error code otherwise.
 *
 */
static int
vboxvfs_prepare_locking(vboxvfs_mount_t *pMount)
{
    lck_grp_attr_t *pGrpAttr;
    lck_grp_t      *pGrp;

    AssertReturn(pMount, EINVAL);

    pGrpAttr = lck_grp_attr_alloc_init();
    if (pGrpAttr)
    {
        pGrp = lck_grp_alloc_init("VBoxVFS", pGrpAttr);
        if (pGrp)
        {
            pMount->pLockGroupAttr = pGrpAttr;
            pMount->pLockGroup      = pGrp;

            return 0;
        }
        else
            PERROR("Unable to allocate locking group");

        lck_grp_attr_free(pGrpAttr);
    }
    else
        PERROR("Unable to allocate locking group attribute");

    return ENOMEM;
}
Exemplo n.º 4
0
void
kpc_thread_init(void)
{
	kpc_thread_lckgrp_attr = lck_grp_attr_alloc_init();
	kpc_thread_lckgrp = lck_grp_alloc_init("kpc", kpc_thread_lckgrp_attr);
	lck_mtx_init(&kpc_thread_lock, kpc_thread_lckgrp, LCK_ATTR_NULL);
}
Exemplo n.º 5
0
funnel_t *
funnel_alloc(
	int type)
{
	lck_mtx_t	*m;
	funnel_t	*fnl;

	if (funnel_lck_grp == LCK_GRP_NULL) {
		funnel_lck_grp_attr = lck_grp_attr_alloc_init();

		funnel_lck_grp = lck_grp_alloc_init("Funnel",  funnel_lck_grp_attr);

		funnel_lck_attr = lck_attr_alloc_init();
	}
	if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
		bzero((void *)fnl, sizeof(funnel_t));
		if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
			kfree(fnl, sizeof(funnel_t));
			return(THR_FUNNEL_NULL);
		}
		fnl->fnl_mutex = m;
		fnl->fnl_type = type;
	}
	return(fnl);
}
Exemplo n.º 6
0
void
os_reason_init()
{
	int reasons_allocated = 0;

	/*
	 * Initialize OS reason group and lock attributes
	 */
	os_reason_lock_grp_attr =  lck_grp_attr_alloc_init();
	os_reason_lock_grp = lck_grp_alloc_init("os_reason_lock", os_reason_lock_grp_attr);
	os_reason_lock_attr = lck_attr_alloc_init();

	/*
	 * Create OS reason zone.
	 */
	os_reason_zone = zinit(sizeof(struct os_reason), OS_REASON_MAX_COUNT * sizeof(struct os_reason),
				OS_REASON_MAX_COUNT, "os reasons");
	if (os_reason_zone == NULL) {
		panic("failed to initialize os_reason_zone");
	}

	/*
	 * We pre-fill the OS reason zone to reduce the likelihood that
	 * the jetsam thread and others block when they create an exit
	 * reason. This pre-filled memory is not-collectable since it's
	 * foreign memory crammed in as part of zfill().
	 */
	reasons_allocated = zfill(os_reason_zone, OS_REASON_RESERVE_COUNT);
	assert(reasons_allocated > 0);
}
Exemplo n.º 7
0
void
cs_init(void)
{
#if MACH_ASSERT && __x86_64__
	panic_on_cs_killed = 1;
#endif /* MACH_ASSERT && __x86_64__ */
	PE_parse_boot_argn("panic_on_cs_killed", &panic_on_cs_killed,
			   sizeof (panic_on_cs_killed));
#if !SECURE_KERNEL
	int disable_cs_enforcement = 0;
	PE_parse_boot_argn("cs_enforcement_disable", &disable_cs_enforcement, 
			   sizeof (disable_cs_enforcement));
	if (disable_cs_enforcement) {
		cs_enforcement_enable = 0;
	} else {
		int panic = 0;
		PE_parse_boot_argn("cs_enforcement_panic", &panic, sizeof(panic));
		cs_enforcement_panic = (panic != 0);
	}

	PE_parse_boot_argn("cs_debug", &cs_debug, sizeof (cs_debug));

#if !CONFIG_ENFORCE_LIBRARY_VALIDATION
	PE_parse_boot_argn("cs_library_val_enable", &cs_library_val_enable,
			   sizeof (cs_library_val_enable));
#endif
#endif /* !SECURE_KERNEL */

	lck_grp_attr_t *attr = lck_grp_attr_alloc_init();
	cs_lockgrp = lck_grp_alloc_init("KERNCS", attr);
}
Exemplo n.º 8
0
void
tcp_lro_init(void)
{
	int i;

	bzero(lro_flow_list, sizeof (struct lro_flow) * TCP_LRO_NUM_FLOWS);
	for (i = 0; i < TCP_LRO_FLOW_MAP; i++) {
		lro_flow_map[i] = TCP_LRO_FLOW_UNINIT;
	}

	/*
	 * allocate lock group attribute, group and attribute for tcp_lro_lock
	 */
	tcp_lro_mtx_grp_attr = lck_grp_attr_alloc_init();
	tcp_lro_mtx_grp = lck_grp_alloc_init("tcplro", tcp_lro_mtx_grp_attr);
	tcp_lro_mtx_attr = lck_attr_alloc_init();
	lck_mtx_init(&tcp_lro_lock, tcp_lro_mtx_grp, tcp_lro_mtx_attr);

	tcp_lro_timer = thread_call_allocate(tcp_lro_timer_proc, NULL);
	if (tcp_lro_timer == NULL) {
		panic_plain("%s: unable to allocate lro timer", __func__);
	}

	return;
}
Exemplo n.º 9
0
/* initial setup done at time of sysinit */
void
pipeinit(void)
{
	nbigpipe=0;
	vm_size_t zone_size;
 
	zone_size = 8192 * sizeof(struct pipe);
        pipe_zone = zinit(sizeof(struct pipe), zone_size, 4096, "pipe zone");


	/* allocate lock group attribute and group for pipe mutexes */
	pipe_mtx_grp_attr = lck_grp_attr_alloc_init();
	pipe_mtx_grp = lck_grp_alloc_init("pipe", pipe_mtx_grp_attr);

	/* allocate the lock attribute for pipe mutexes */
	pipe_mtx_attr = lck_attr_alloc_init();

	/*
	 * Set up garbage collection for dead pipes
	 */
	zone_size = (PIPE_GARBAGE_QUEUE_LIMIT + 20) *
	    sizeof(struct pipe_garbage);
        pipe_garbage_zone = (zone_t)zinit(sizeof(struct pipe_garbage),
	    zone_size, 4096, "pipe garbage zone");
	pipe_garbage_lock = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr);
	
}
Exemplo n.º 10
0
Arquivo: IOLib.cpp Projeto: Prajna/xnu
void IOLibInit(void)
{
    kern_return_t ret;

    static bool libInitialized;

    if(libInitialized)
        return;	

    gIOKitPageableSpace.maps[0].address = 0;
    ret = kmem_suballoc(kernel_map,
                    &gIOKitPageableSpace.maps[0].address,
                    kIOPageableMapSize,
                    TRUE,
                    VM_FLAGS_ANYWHERE,
                    &gIOKitPageableSpace.maps[0].map);
    if (ret != KERN_SUCCESS)
        panic("failed to allocate iokit pageable map\n");

    IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);

    gIOKitPageableSpace.lock 		= lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
    gIOKitPageableSpace.maps[0].end	= gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
    gIOKitPageableSpace.hint		= 0;
    gIOKitPageableSpace.count		= 1;

    gIOMallocContiguousEntriesLock 	= lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
    queue_init( &gIOMallocContiguousEntries );

    libInitialized = true;
}
Exemplo n.º 11
0
void
flowadv_init(void)
{
	STAILQ_INIT(&fadv_list);

	/* Setup lock group and attribute for fadv_lock */
	fadv_lock_grp_attr = lck_grp_attr_alloc_init();
	fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr);
	lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL);

	fadv_zone_size = P2ROUNDUP(sizeof (struct flowadv_fcentry),
	    sizeof (u_int64_t));
	fadv_zone = zinit(fadv_zone_size,
	    FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME);
	if (fadv_zone == NULL) {
		panic("%s: failed allocating %s", __func__, FADV_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(fadv_zone, Z_EXPAND, TRUE);
	zone_change(fadv_zone, Z_CALLERACCT, FALSE);

	if (kernel_thread_start(flowadv_thread_func, NULL, &fadv_thread) !=
	    KERN_SUCCESS) {
		panic("%s: couldn't create flow event advisory thread",
		    __func__);
		/* NOTREACHED */
	}
	thread_deallocate(fadv_thread);
}
Exemplo n.º 12
0
void
kpc_common_init(void)
{
	kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
	kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
	lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
}
Exemplo n.º 13
0
void
ecc_log_init()
{
	ecc_prefer_panic = !PE_reboot_on_panic();
	ecc_data_lock_group = lck_grp_alloc_init("ecc-data", NULL);
	lck_spin_init(&ecc_data_lock, ecc_data_lock_group, NULL);
	OSMemoryBarrier();
}
Exemplo n.º 14
0
/*
 * Initialize cnode hash table.
 */
__private_extern__
void
hfs_chashinit()
{
	chash_lck_grp_attr= lck_grp_attr_alloc_init();
	chash_lck_grp  = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
	chash_lck_attr = lck_attr_alloc_init();
}
Exemplo n.º 15
0
void
OSMalloc_init(
	void)
{
	queue_init(&OSMalloc_tag_list);

	OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL);
	lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL);
}
Exemplo n.º 16
0
void
init_system_override()
{
	sys_override_mtx_grp_attr = lck_grp_attr_alloc_init();
	sys_override_mtx_grp = lck_grp_alloc_init("system_override", sys_override_mtx_grp_attr);
	sys_override_mtx_attr = lck_attr_alloc_init();
	lck_mtx_init(&sys_override_lock, sys_override_mtx_grp, sys_override_mtx_attr);
	io_throttle_assert_cnt = cpu_throttle_assert_cnt = 0;
}
Exemplo n.º 17
0
/* Initialize the mutex governing access to the SysV msg subsystem */
__private_extern__ void
sysv_msg_lock_init( void )
{
	sysv_msg_subsys_lck_grp_attr = lck_grp_attr_alloc_init();

	sysv_msg_subsys_lck_grp = lck_grp_alloc_init("sysv_msg_subsys_lock", sysv_msg_subsys_lck_grp_attr);

	sysv_msg_subsys_lck_attr = lck_attr_alloc_init();
	lck_mtx_init(&sysv_msg_subsys_mutex, sysv_msg_subsys_lck_grp, sysv_msg_subsys_lck_attr);
}
Exemplo n.º 18
0
void
kperf_bootstrap(void)
{
	kperf_cfg_lckgrp_attr = lck_grp_attr_alloc_init();
	kperf_cfg_lckgrp = lck_grp_alloc_init("kperf cfg", 
                                          kperf_cfg_lckgrp_attr);
	lck_mtx_init(&kperf_cfg_lock, kperf_cfg_lckgrp, LCK_ATTR_NULL);

	kperf_cfg_initted = TRUE;
}
Exemplo n.º 19
0
static inline void setup_locks() {
  // Create locks. Cannot be done on the stack.
  osquery.lck_grp_attr = lck_grp_attr_alloc_init();
  lck_grp_attr_setstat(osquery.lck_grp_attr);
  osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr);
  osquery.lck_attr = lck_attr_alloc_init();

  // MTX is the IOCTL API handling lock.
  // This assures only one daemon will use the kernel API simultaneously.
  osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr);
}
Exemplo n.º 20
0
static inline void setup_locks() {
    /* Create locks.  Cannot be done on the stack. */
    osquery.lck_grp_attr = lck_grp_attr_alloc_init();
    lck_grp_attr_setstat(osquery.lck_grp_attr);

    osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr);

    osquery.lck_attr = lck_attr_alloc_init();

    osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr);
}
Exemplo n.º 21
0
/* hv_support boot initialization */
void
hv_support_init(void) {
#if defined(__x86_64__) && CONFIG_VMX
	hv_support_available = vmx_hv_support();
#endif

	hv_support_lck_grp = lck_grp_alloc_init("hv_support", LCK_GRP_ATTR_NULL);
	assert(hv_support_lck_grp);

	hv_support_lck_mtx = lck_mtx_alloc_init(hv_support_lck_grp, LCK_ATTR_NULL);
	assert(hv_support_lck_mtx);
}
Exemplo n.º 22
0
void
zfs_context_init(void)
{
	uint64_t kern_mem_size;

	zfs_lock_attr = lck_attr_alloc_init();
	zfs_group_attr = lck_grp_attr_alloc_init();
#if 0
	lck_attr_setdebug(zfs_lock_attr);
#endif
	zfs_mutex_group  = lck_grp_alloc_init("zfs-mutex", zfs_group_attr);
	zfs_rwlock_group = lck_grp_alloc_init("zfs-rwlock", zfs_group_attr);
	zfs_spinlock_group = lck_grp_alloc_init("zfs-spinlock", zfs_group_attr);

	zfs_kmem_alloc_tag = OSMalloc_Tagalloc("ZFS general purpose", 
			OSMT_DEFAULT);

	max_ncpus = 1;

	/* kernel memory space is 4 GB max */
	kern_mem_size = MIN(max_mem, (uint64_t)0x0FFFFFFFFULL);

	/* Calculate number of pages of memory on the system */
	physmem = kern_mem_size / PAGE_SIZE;

	/* Constrain our memory use on smaller memory systems */
	if (kern_mem_size <= 0x20000000)
		zfs_footprint.maximum = kern_mem_size / 7;    /* 512MB: ~15 % */
	else if (kern_mem_size <= 0x30000000)
		zfs_footprint.maximum = kern_mem_size / 5;    /* 768MB: ~20 % */
	else if (kern_mem_size <= 0x40000000)
		zfs_footprint.maximum = kern_mem_size / 3;    /* 1GB: ~33 % */
	else	/* set to 1GB limit maximum*/ 
		zfs_footprint.maximum = MIN((kern_mem_size / 2), 0x40000000);

	recalc_target_footprint(100);

	printf("zfs_context_init: footprint.maximum=%lu, footprint.target=%lu\n",
		zfs_footprint.maximum, zfs_footprint.target);
}
Exemplo n.º 23
0
Arquivo: bpf.c Projeto: SbIm/xnu-env
void
bpf_init(__unused void *unused)
{
#ifdef __APPLE__
	int 	i;
	int	maj;

	if (bpf_devsw_installed == 0) {
		bpf_devsw_installed = 1;

        bpf_mlock_grp_attr = lck_grp_attr_alloc_init();

        bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);

        bpf_mlock_attr = lck_attr_alloc_init();

        bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr);

		if (bpf_mlock == 0) {
			printf("bpf_init: failed to allocate bpf_mlock\n");
			bpf_devsw_installed = 0;
			return;
		}
		
		maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
		if (maj == -1) {
			if (bpf_mlock)
				lck_mtx_free(bpf_mlock, bpf_mlock_grp);
			if (bpf_mlock_attr)
				lck_attr_free(bpf_mlock_attr);
			if (bpf_mlock_grp)
				lck_grp_free(bpf_mlock_grp);
			if (bpf_mlock_grp_attr)
				lck_grp_attr_free(bpf_mlock_grp_attr);
			
			bpf_mlock = NULL;
			bpf_mlock_attr = NULL;
			bpf_mlock_grp = NULL;
			bpf_mlock_grp_attr = NULL;
			bpf_devsw_installed = 0;
			printf("bpf_init: failed to allocate a major number!\n");
			return;
		}

		for (i = 0 ; i < NBPFILTER; i++)
			bpf_make_dev_t(maj);
	}
#else
	cdevsw_add(&bpf_cdevsw);
#endif
}
Exemplo n.º 24
0
__private_extern__ kern_return_t
kext_start_9p(kmod_info_t *ki, void *d)
{
#pragma unused(ki)
#pragma unused(d)
	int e;

	TRACE();
	lck_grp_9p = lck_grp_alloc_init(VFS9PNAME, LCK_GRP_ATTR_NULL);
	if ((e=vfs_fsadd(&vfs_fsentry_9p, &vfstable_9p)))
		return KERN_FAILURE;

	return KERN_SUCCESS;
}
Exemplo n.º 25
0
void
fuse_sysctl_start(void)
{
    int i;
#if OSXFUSE_ENABLE_MACFUSE_MODE
    osxfuse_lock_group  = lck_grp_alloc_init("osxfuse", NULL);
    osxfuse_sysctl_lock = lck_mtx_alloc_init(osxfuse_lock_group, NULL);
#endif

    sysctl_register_oid(&sysctl__osxfuse);
    for (i = 0; fuse_sysctl_list[i]; i++) {
       sysctl_register_oid(fuse_sysctl_list[i]);
    }
}
Exemplo n.º 26
0
rMutex
    rpal_mutex_create
    (

    )
{
    lck_mtx_t* mutex = NULL;
    
    lck_grp_attr_t* gattr = NULL;
    lck_attr_t* lattr = NULL;
    
    if( 0 == g_lck_group )
    {
        rpal_debug_info( "mutex group not created, creating" );
        
        gattr = lck_grp_attr_alloc_init();
        
        if( NULL == gattr )
        {
            rpal_debug_critical( "could not create mutex group" );
            return NULL;
        }
        
        lck_grp_attr_setstat( gattr );
        
        g_lck_group = lck_grp_alloc_init( "hcphbs", gattr );
        
        lck_grp_attr_free( gattr );
    }
    
    if( NULL == g_lck_group )
    {
        return NULL;
    }
    
    lattr = lck_attr_alloc_init();
    
    if( NULL != lattr )
    {
        mutex = lck_mtx_alloc_init( g_lck_group, lattr );
        lck_attr_free( lattr );
    }
    else
    {
        rpal_debug_critical( "could not create mutex attributes" );
    }
    
    return mutex;
}
Exemplo n.º 27
0
/*
 * Initialize raw connection block q.
 */
void
rip_init(struct protosw *pp, struct domain *dp)
{
#pragma unused(dp)
	static int rip_initialized = 0;
	struct inpcbinfo *pcbinfo;

	VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);

	if (rip_initialized)
		return;
	rip_initialized = 1;

	LIST_INIT(&ripcb);
	ripcbinfo.ipi_listhead = &ripcb;
	/*
	 * XXX We don't use the hash list for raw IP, but it's easier
	 * to allocate a one entry hash list than it is to check all
	 * over the place for ipi_hashbase == NULL.
	 */
	ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask);
	ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask);

	ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb),
	    (4096 * sizeof(struct inpcb)), 4096, "ripzone");

	pcbinfo = &ripcbinfo;
        /*
	 * allocate lock group attribute and group for udp pcb mutexes
	 */
	pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
	pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr);

	/*
	 * allocate the lock attribute for udp pcb mutexes
	 */
	pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
	if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
	    pcbinfo->ipi_lock_attr)) == NULL) {
		panic("%s: unable to allocate PCB lock\n", __func__);
		/* NOTREACHED */
	}

	in_pcbinfo_attach(&ripcbinfo);
}
Exemplo n.º 28
0
void lpx_datagram_init()
{
	DEBUG_PRINT(DEBUG_MASK_DGRAM_TRACE, ("lpx_datagram_init: Entered.\n"));
	
	// Init Lock.
	datagram_mtx_grp_attr = lck_grp_attr_alloc_init();
	lck_grp_attr_setdefault(datagram_mtx_grp_attr);
	
	datagram_mtx_grp = lck_grp_alloc_init("datagrampcb", datagram_mtx_grp_attr);
	
	datagram_mtx_attr = lck_attr_alloc_init();
	lck_attr_setdefault(datagram_mtx_attr);
	
	if ((lpx_datagram_pcb.lpxp_list_rw = lck_rw_alloc_init(datagram_mtx_grp, datagram_mtx_attr)) == NULL) {
		DEBUG_PRINT(DEBUG_MASK_STREAM_ERROR, ("lpx_datagram_init: Can't alloc mtx\n"));
	}
	
	return;
}
Exemplo n.º 29
0
/* This should only be called from the bootstrap thread. */
void
ktrace_init(void)
{
	static lck_grp_attr_t *lock_grp_attr = NULL;
	static lck_grp_t *lock_grp = NULL;
	static boolean_t initialized = FALSE;

	if (initialized) {
		return;
	}

	lock_grp_attr = lck_grp_attr_alloc_init();
	lock_grp = lck_grp_alloc_init("ktrace", lock_grp_attr);
	lck_grp_attr_free(lock_grp_attr);

	ktrace_lock = lck_mtx_alloc_init(lock_grp, LCK_ATTR_NULL);
	assert(ktrace_lock);
	initialized = TRUE;
}
Exemplo n.º 30
0
static kern_return_t
register_locks(void)
{
	/* already allocated? */
	if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock)
		return KERN_SUCCESS;

	/* allocate lock group attribute and group */
	if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init()))
		goto nomem_out;

	lck_grp_attr_setstat(ucode_slock_grp_attr);

	if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr)))
		goto nomem_out;

	/* Allocate lock attribute */
	if (!(ucode_slock_attr = lck_attr_alloc_init()))
		goto nomem_out;

	/* Allocate the spin lock */
	/* We keep one global spin-lock. We could have one per update
	 * request... but srsly, why would you update microcode like that?
	 */
	if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr)))
		goto nomem_out;

	return KERN_SUCCESS;

nomem_out:
	/* clean up */
	if (ucode_slock)
		lck_spin_free(ucode_slock, ucode_slock_grp);
	if (ucode_slock_attr)
		lck_attr_free(ucode_slock_attr);
	if (ucode_slock_grp)
		lck_grp_free(ucode_slock_grp);
	if (ucode_slock_grp_attr)
		lck_grp_attr_free(ucode_slock_grp_attr);

	return KERN_NO_SPACE;
}