Exemple #1
0
static int
null_reclaim(struct vnop_reclaim_args * ap)
{
	struct vnode * vp;
	struct null_node * xp;
	struct vnode * lowervp;
	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));

	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);

	vp = ap->a_vp;

	xp      = VTONULL(vp);
	lowervp = xp->null_lowervp;

	lck_mtx_lock(&null_mp->nullm_lock);

	vnode_removefsref(vp);

	if (lowervp != NULL) {
		/* root and second don't have a lowervp, so nothing to release and nothing
		 * got hashed */
		if (xp->null_flags & NULL_FLAG_HASHED) {
			/* only call this if we actually made it into the hash list. reclaim gets
			   called also to
			   clean up a vnode that got created when it didn't need to under race
			   conditions */
			null_hashrem(xp);
		}
		vnode_getwithref(lowervp);
		vnode_rele(lowervp);
		vnode_put(lowervp);
	}

	if (vp == null_mp->nullm_rootvp) {
		null_mp->nullm_rootvp = NULL;
	} else if (vp == null_mp->nullm_secondvp) {
		null_mp->nullm_secondvp = NULL;
	} else if (vp == null_mp->nullm_thirdcovervp) {
		null_mp->nullm_thirdcovervp = NULL;
	}

	lck_mtx_unlock(&null_mp->nullm_lock);

	cache_purge(vp);
	vnode_clearfsnode(vp);

	FREE(xp, M_TEMP);

	return 0;
}
Exemple #2
0
int
bpfkqfilter(dev_t dev, struct knote *kn)
{
	struct bpf_d *d;

	/*
	 * Is this device a bpf?
	 */
	if (major(dev) != CDEV_MAJOR) {
		return (EINVAL);
	}

	if (kn->kn_filter != EVFILT_READ) {
		return (EINVAL);
	}

	lck_mtx_lock(bpf_mlock);

	d = bpf_dtab[minor(dev)];
	if (d == 0 || d == (void *)1) {
		lck_mtx_unlock(bpf_mlock);
		return (ENXIO);
	}

	/*
	 * An imitation of the FIONREAD ioctl code.
	 */
	if (d->bd_bif == NULL) {
		lck_mtx_unlock(bpf_mlock);
		return (ENXIO);
	}

	kn->kn_hook = d;
	kn->kn_fop = &bpfread_filtops;
	KNOTE_ATTACH(&d->bd_sel.si_note, kn);
	lck_mtx_unlock(bpf_mlock);
	return 0;
}
Exemple #3
0
void
net_add_domain(struct domain *dp)
{
	kprintf("Adding domain %s (family %d)\n", dp->dom_name,
		dp->dom_family);
	/* First, link in the domain */

	lck_mtx_lock(domain_proto_mtx);
	concat_domain(dp);

	init_domain(dp);
	lck_mtx_unlock(domain_proto_mtx);

}
Exemple #4
0
static void
in6_rtqtimo(void *rock)
{
	struct radix_node_head *rnh = rock;
	struct rtqk_arg arg;
	struct timeval atv;
	static time_t last_adjusted_timeout = 0;
	struct timeval timenow;

	lck_mtx_lock(rnh_lock);
	/* Get the timestamp after we acquire the lock for better accuracy */
	getmicrotime(&timenow);

	arg.found = arg.killed = 0;
	arg.rnh = rnh;
	arg.nextstop = timenow.tv_sec + rtq_timeout;
	arg.draining = arg.updating = 0;
	rnh->rnh_walktree(rnh, in6_rtqkill, &arg);

	/*
	 * Attempt to be somewhat dynamic about this:
	 * If there are ``too many'' routes sitting around taking up space,
	 * then crank down the timeout, and see if we can't make some more
	 * go away.  However, we make sure that we will never adjust more
	 * than once in rtq_timeout seconds, to keep from cranking down too
	 * hard.
	 */
	if ((arg.found - arg.killed > rtq_toomany)
	   && (timenow.tv_sec - last_adjusted_timeout >= rtq_timeout)
	   && rtq_reallyold > rtq_minreallyold) {
		rtq_reallyold = 2*rtq_reallyold / 3;
		if (rtq_reallyold < rtq_minreallyold) {
			rtq_reallyold = rtq_minreallyold;
		}

		last_adjusted_timeout = timenow.tv_sec;
#if DIAGNOSTIC
		log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d",
		    rtq_reallyold);
#endif
		arg.found = arg.killed = 0;
		arg.updating = 1;
		rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
	}

	atv.tv_usec = 0;
	atv.tv_sec = arg.nextstop - timenow.tv_sec;
	lck_mtx_unlock(rnh_lock);
	timeout(in6_rtqtimo, rock, tvtohz(&atv));
}
Exemple #5
0
/* helper function to handle locking where possible */
static int
nullfs_checkspecialvp(struct vnode* vp)
{
	int result = 0;
	struct null_mount * null_mp;

	null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));

	lck_mtx_lock(&null_mp->nullm_lock);
	result = (nullfs_isspecialvp(vp));
	lck_mtx_unlock(&null_mp->nullm_lock);

	return result;
}
void atalk_load()
{
	atp_init();
	atp_link();
	adspInited = 0;

/*	adsp_init(); 
		for 2225395
		this happens in adsp_open and is undone on ADSP_UNLINK 
*/
	lck_mtx_unlock(domain_proto_mtx);
	proto_register_input(PF_APPLETALK, at_input_packet, NULL, 0);
	lck_mtx_lock(domain_proto_mtx);
} /* atalk_load */
Exemple #7
0
void
in_rtqdrain(void)
{
	struct radix_node_head *rnh = rt_tables[AF_INET];
	struct rtqk_arg arg;
	arg.found = arg.killed = 0;
	arg.rnh = rnh;
	arg.nextstop = 0;
	arg.draining = 1;
	arg.updating = 0;
	lck_mtx_lock(rt_mtx);
	rnh->rnh_walktree(rnh, in_rtqkill, &arg);
	lck_mtx_unlock(rt_mtx);
}
Exemple #8
0
/* register callbacks for certain task/thread events for tasks/threads with
   associated hv objects */
kern_return_t
hv_set_callbacks(hv_callbacks_t callbacks) {
	kern_return_t kr = KERN_FAILURE;

	lck_mtx_lock(hv_support_lck_mtx);
	if (hv_callbacks_enabled == 0) {	
		hv_callbacks = callbacks;
		hv_callbacks_enabled = 1;
		kr = KERN_SUCCESS;
	}
	lck_mtx_unlock(hv_support_lck_mtx);

	return kr;
}
Exemple #9
0
void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
			   IOPhysicalAddress * physicalAddress)
{
    mach_vm_address_t	address = 0;

    if (size == 0)
	return 0;
    if (alignment == 0) 
	alignment = 1;

    /* Do we want a physical address? */
    if (!physicalAddress)
    {
	address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
    }
    else do
    {
	IOBufferMemoryDescriptor * bmd;
	mach_vm_address_t          physicalMask;
	vm_offset_t		   alignMask;

	alignMask = alignment - 1;
	physicalMask = (0xFFFFFFFF ^ alignMask);

	bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
		kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
	if (!bmd)
	    break;
	
	_IOMallocContiguousEntry *
	entry = IONew(_IOMallocContiguousEntry, 1);
	if (!entry)
	{
	    bmd->release();
	    break;
	}
	entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
	entry->md          = bmd;
	lck_mtx_lock(gIOMallocContiguousEntriesLock);
	queue_enter( &gIOMallocContiguousEntries, entry, 
		    _IOMallocContiguousEntry *, link );
	lck_mtx_unlock(gIOMallocContiguousEntriesLock);

	address          = (mach_vm_address_t) entry->virtualAddr;
	*physicalAddress = bmd->getPhysicalAddress();
    }
    while (false);

    return (void *) address;
}
Exemple #10
0
static void
flowadv_thread_func(void *v, wait_result_t w)
{
#pragma unused(v, w)
	lck_mtx_lock(&fadv_lock);
	(void) msleep0(&fadv_list, &fadv_lock, (PSOCK | PSPIN),
	    "flowadv", 0, flowadv_thread_cont);
	/*
	 * msleep0() shouldn't have returned as PCATCH was not set;
	 * therefore assert in this case.
	 */
	lck_mtx_unlock(&fadv_lock);
	VERIFY(0);
}
Exemple #11
0
void
pffasttimo(__unused void *arg)
{
	register struct domain *dp;
	register struct protosw *pr;

	lck_mtx_lock(domain_proto_mtx);
	for (dp = domains; dp; dp = dp->dom_next)
		for (pr = dp->dom_protosw; pr; pr = pr->pr_next)
			if (pr->pr_fasttimo)
				(*pr->pr_fasttimo)();
	lck_mtx_unlock(domain_proto_mtx);
	timeout(pffasttimo, NULL, hz/PR_FASTHZ);
}
Exemple #12
0
int
net_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, 
           user_addr_t newp, size_t newlen, __unused struct proc *p)
{
	register struct domain *dp;
	register struct protosw *pr;
	int family, protocol, error;

	/*
	 * All sysctl names at this level are nonterminal;
	 * next two components are protocol family and protocol number,
	 * then at least one addition component.
	 */
	if (namelen < 3)
		return (EISDIR);		/* overloaded */
	family = name[0];
	protocol = name[1];

	if (family == 0)
		return (0);
	lck_mtx_lock(domain_proto_mtx);
	for (dp = domains; dp; dp = dp->dom_next)
		if (dp->dom_family == family)
			goto found;
	lck_mtx_unlock(domain_proto_mtx);
	return (ENOPROTOOPT);
found:
	for (pr = dp->dom_protosw; pr; pr = pr->pr_next)
		if (pr->pr_protocol == protocol && pr->pr_sysctl) {
			error = (*pr->pr_sysctl)(name + 2, namelen - 2,
			    (void *)(uintptr_t)oldp, oldlenp, (void *)(uintptr_t)newp, newlen);
			lck_mtx_unlock(domain_proto_mtx);
			return (error);
		}
	lck_mtx_unlock(domain_proto_mtx);
	return (ENOPROTOOPT);
}
Exemple #13
0
/* release callbacks for task/thread events */
void
hv_release_callbacks(void) {
	lck_mtx_lock(hv_support_lck_mtx);	
	hv_callbacks = (hv_callbacks_t) {
		.dispatch = NULL,
		.preempt = NULL,
		.thread_destroy = NULL,
		.task_destroy = NULL,
		.volatile_state = NULL,
		.memory_pressure = NULL
	};

	hv_callbacks_enabled = 0;
	lck_mtx_unlock(hv_support_lck_mtx);
}
Exemple #14
0
/* -----------------------------------------------------------------------------
Called by socket layer when a new socket is created
Should create all the structures and prepare for L2TP dialog
----------------------------------------------------------------------------- */
int l2tp_attach (struct socket *so, int proto, struct proc *p)
{
    int			error;

    //IOLog("l2tp_attach, so = %p, dom_ref = %d\n", so, so->so_proto->pr_domain->dom_refs);
    if (so->so_pcb)
        return EINVAL;

    if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
        error = soreserve(so, 8192, 8192);
        if (error)
            return error;
    }
   
    // call l2tp init with the rfc specific structure
	lck_mtx_lock(ppp_domain_mutex);
    if (l2tp_rfc_new_client(so, (void**)&(so->so_pcb), l2tp_input, l2tp_event)) {
		lck_mtx_unlock(ppp_domain_mutex);
        return ENOMEM;
    }

	lck_mtx_unlock(ppp_domain_mutex);
    return 0;
}
Exemple #15
0
/*
 * Drops a reference on the passed reason, deallocates
 * the reason if no references remain.
 */
void
os_reason_free(os_reason_t cur_reason)
{
	if (cur_reason == OS_REASON_NULL) {
		return;
	}

	lck_mtx_lock(&cur_reason->osr_lock);

	assert(cur_reason->osr_refcount > 0);

	cur_reason->osr_refcount--;
	if (cur_reason->osr_refcount != 0) {
		lck_mtx_unlock(&cur_reason->osr_lock);
		return;
	}

	os_reason_dealloc_buffer(cur_reason);

	lck_mtx_unlock(&cur_reason->osr_lock);
	lck_mtx_destroy(&cur_reason->osr_lock, os_reason_lock_grp);

	zfree(os_reason_zone, cur_reason);
}
Exemple #16
0
void
pfctlinput2(int cmd, struct sockaddr *sa, void *ctlparam)
{
	struct domain *dp;
	struct protosw *pr;

	if (!sa)
		return;

	lck_mtx_lock(domain_proto_mtx);
	for (dp = domains; dp; dp = dp->dom_next)
		for (pr = dp->dom_protosw; pr; pr = pr->pr_next)
			if (pr->pr_ctlinput)
				(*pr->pr_ctlinput)(cmd, sa, ctlparam);
	lck_mtx_unlock(domain_proto_mtx);
}
Exemple #17
0
void
flowadv_add(struct flowadv_fclist *fcl)
{
	if (STAILQ_EMPTY(fcl))
		return;

	lck_mtx_lock_spin(&fadv_lock);

	STAILQ_CONCAT(&fadv_list, fcl);
	VERIFY(!STAILQ_EMPTY(&fadv_list));

	if (!fadv_active && fadv_thread != THREAD_NULL)
		wakeup_one((caddr_t)&fadv_list);

	lck_mtx_unlock(&fadv_lock);
}
Exemple #18
0
/*
 * Takes a reference on the passed reason.
 */
void
os_reason_ref(os_reason_t cur_reason)
{
	if (cur_reason == OS_REASON_NULL) {
		return;
	}

	lck_mtx_lock(&cur_reason->osr_lock);

	assert(cur_reason->osr_refcount > 0);
	cur_reason->osr_refcount++;

	lck_mtx_unlock(&cur_reason->osr_lock);

	return;
}
Exemple #19
0
static int osquery_open(dev_t dev, int oflags, int devtype, struct proc *p) {
  // Close is not working so leave these out for now.
  int err = 0;
  lck_mtx_lock(osquery.mtx);
  if (osquery.open_count == 0) {
    osquery.open_count++;
  }
#ifndef KERNEL_TEST
  else {
    err = -EACCES;
  }
#endif // !KERNEL_TEST

  lck_mtx_unlock(osquery.mtx);
  return err;
}
Exemple #20
0
static int
flowadv_thread_cont(int err)
{
#pragma unused(err)
	for (;;) {
		lck_mtx_assert(&fadv_lock, LCK_MTX_ASSERT_OWNED);
		while (STAILQ_EMPTY(&fadv_list)) {
			VERIFY(!fadv_active);
			(void) msleep0(&fadv_list, &fadv_lock, (PSOCK | PSPIN),
			    "flowadv_cont", 0, flowadv_thread_cont);
			/* NOTREACHED */
		}

		fadv_active = 1;
		for (;;) {
			struct flowadv_fcentry *fce;

			VERIFY(!STAILQ_EMPTY(&fadv_list));
			fce = STAILQ_FIRST(&fadv_list);
			STAILQ_REMOVE(&fadv_list, fce,
			    flowadv_fcentry, fce_link);
			STAILQ_NEXT(fce, fce_link) = NULL;

			lck_mtx_unlock(&fadv_lock);
			switch (fce->fce_flowsrc) {
			case FLOWSRC_INPCB:
				inp_flowadv(fce->fce_flowid);
				break;

			case FLOWSRC_IFNET:
				ifnet_flowadv(fce->fce_flowid);
				break;

			case FLOWSRC_PF:
			default:
				break;
			}
			flowadv_free_entry(fce);
			lck_mtx_lock_spin(&fadv_lock);

			/* if there's no pending request, we're done */
			if (STAILQ_EMPTY(&fadv_list))
				break;
		}
		fadv_active = 0;
	}
}
Exemple #21
0
/*
 * Insertion is O(n) due to the priority scan, but optimises to O(1)
 * if all priorities are identical.
 */
static eventhandler_tag
eventhandler_register_internal(
    struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
    struct eventhandler_list *list,
    const char *name, eventhandler_tag epn)
{
	struct eventhandler_list		*new_list;
	struct eventhandler_entry		*ep;

	if (evthdlr_lists_ctxt == NULL)
		evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb;

	VERIFY(evthdlr_lists_ctxt->eventhandler_lists_initted); /* eventhandler registered too early */
	VERIFY(epn != NULL); /* cannot register NULL event */

	/* lock the eventhandler lists */
	lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex);

	/* Do we need to find/create the (slow) list? */
	if (list == NULL) {
		/* look for a matching, existing list */
		list = _eventhandler_find_list(evthdlr_lists_ctxt, name);

		/* Do we need to create the list? */
		if (list == NULL) {
			lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);

			MALLOC(new_list, struct eventhandler_list *,
			    sizeof(struct eventhandler_list) + strlen(name) + 1,
			    M_EVENTHANDLER, M_WAITOK);

			/* If someone else created it already, then use that one. */
			lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex);
			list = _eventhandler_find_list(evthdlr_lists_ctxt, name);
			if (list != NULL) {
				FREE(new_list, M_EVENTHANDLER);
			} else {
				evhlog((LOG_DEBUG, "%s: creating list \"%s\"", __func__, name));
				list = new_list;
				list->el_flags = 0;
				list->el_runcount = 0;
				bzero(&list->el_lock, sizeof(list->el_lock));
				list->el_name = (char *)list + sizeof(struct eventhandler_list);
				strlcpy(list->el_name, name, strlen(name) + 1);
				TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt->eventhandler_lists, list, el_link);
			}
		}
Exemple #22
0
static int
pflog_clone_destroy(struct ifnet *ifp)
{
	struct pflog_softc *pflogif = ifp->if_softc;

	lck_rw_lock_shared(pf_perim_lock);
	lck_mtx_lock(pf_lock);
	pflogifs[pflogif->sc_unit] = NULL;
	LIST_REMOVE(pflogif, sc_list);
	lck_mtx_unlock(pf_lock);
	lck_rw_done(pf_perim_lock);

	/* bpfdetach() is taken care of as part of interface detach */
	(void) ifnet_detach(ifp);

	return 0;
}
Exemple #23
0
/* register a list of trap handlers for the hv_*_trap syscalls */
kern_return_t
hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps,
	unsigned trap_count)
{
	hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
	kern_return_t kr = KERN_FAILURE;

	lck_mtx_lock(hv_support_lck_mtx);
	if (trap_table->trap_count == 0) {	
		trap_table->traps = traps;
		OSMemoryBarrier();
		trap_table->trap_count = trap_count;
		kr = KERN_SUCCESS;
	}
	lck_mtx_unlock(hv_support_lck_mtx);

	return kr;
}
Exemple #24
0
/*
 * Act like null_hashget, but add passed null_node to hash if no existing
 * node found.
 */
static int
null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp)
{
	struct null_node_hashhead * hd;
	struct null_node * oxp;
	struct vnode * ovp;
	int error = 0;

	hd = NULL_NHASH(xp->null_lowervp);
	lck_mtx_lock(&null_hashmtx);
	LIST_FOREACH(oxp, hd, null_hash)
	{
		if (oxp->null_lowervp == xp->null_lowervp && vnode_mount(NULLTOV(oxp)) == mp) {
			/*
			 * See null_hashget for a description of this
			 * operation.
			 */
			ovp = NULLTOV(oxp);
			if (oxp->null_lowervid != vnode_vid(oxp->null_lowervp)) {
				/*vp doesn't exist so return null (not sure we are actually gonna catch
				 recycle right now
				 This is an exceptional case right now, it suggests the vnode we are
				 trying to add has been recycled
				 don't add it.*/
				error = EIO;
				goto end;
			}
			/* if we found something in the hash map then grab an iocount */
			error = vnode_getwithvid(ovp, oxp->null_myvid);
			if (error == 0) {
				*vpp = ovp;
			}
			goto end;
		}
	}
	/* if it wasn't in the hash map then the vnode pointed to by xp already has a
	 * iocount so don't bother */
	LIST_INSERT_HEAD(hd, xp, null_hash);
	xp->null_flags |= NULL_FLAG_HASHED;
end:
	lck_mtx_unlock(&null_hashmtx);
	return error;
}
Exemple #25
0
void
pfslowtimo(__unused void *arg)
{
	register struct domain *dp;
	register struct protosw *pr;

	lck_mtx_lock(domain_proto_mtx);
	for (dp = domains; dp; dp = dp->dom_next) 
		for (pr = dp->dom_protosw; pr; pr = pr->pr_next) {
			if (pr->pr_slowtimo)
				(*pr->pr_slowtimo)();
			if (do_reclaim && pr->pr_drain)
				(*pr->pr_drain)();
		}
	do_reclaim = 0;
	lck_mtx_unlock(domain_proto_mtx);
	timeout(pfslowtimo, NULL, hz/PR_SLOWHZ);
        
}
Exemple #26
0
uint32_t
kpc_get_thread_counting(void)
{
	uint32_t kpc_thread_classes_tmp;
	int kpc_threads_counting_tmp;

	/* Make sure we get a consistent snapshot of these values */
	lck_mtx_lock(&kpc_thread_lock);

	kpc_thread_classes_tmp = kpc_thread_classes;
	kpc_threads_counting_tmp = kpc_threads_counting;

	lck_mtx_unlock(&kpc_thread_lock);

	if( kpc_threads_counting_tmp )
		return kpc_thread_classes_tmp;
	else
		return 0;
}
Exemple #27
0
u_int32_t
scope6_addr2default(struct in6_addr *addr)
{
	u_int32_t id = 0;
	int index = in6_addrscope(addr);

	/*
	 * special case: The loopback address should be considered as
	 * link-local, but there's no ambiguity in the syntax.
	 */
	if (IN6_IS_ADDR_LOOPBACK(addr))
		return (0);

	lck_mtx_lock(&scope6_lock);
	id = sid_default.s6id_list[index];
	lck_mtx_unlock(&scope6_lock);

	return (id);
}
Exemple #28
0
static errno_t sf_attach(void** cookie, socket_t so)
{
    errno_t ret = 0;
    
    lck_mtx_lock(global_mutex);

    char name[PATH_MAX];
    
    proc_selfname(name, PATH_MAX);
    pp("proc_selfname: %s", name);
    if (strncmp(name, "nc", 3)) {
        pp("装载到进程: %s", name);
    } else {
        ret = ENOPOLICY;
    }
    
    lck_mtx_unlock(global_mutex);

    return ret;
}
Exemple #29
0
int
kpc_get_period(uint32_t classes, uint64_t *val)
{
	uint32_t count = 0 ;
	uint64_t pmc_mask = 0ULL;

	assert(val);

	lck_mtx_lock(&kpc_config_lock);

	if (classes & KPC_CLASS_FIXED_MASK) {
		/* convert reload values to periods */
		count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
		for (uint32_t i = 0; i < count; ++i)
			*val++ = kpc_fixed_max() - FIXED_RELOAD(i);
	}

	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);

		/* convert reload values to periods */
		count = kpc_configurable_count();
		for (uint32_t i = 0; i < count; ++i)
			if ((1ULL << i) & pmc_mask)
				*val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
	}

	if (classes & KPC_CLASS_POWER_MASK) {
		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);

		/* convert reload values to periods */
		count = kpc_configurable_count();
		for (uint32_t i = 0; i < count; ++i)
			if ((1ULL << i) & pmc_mask)
				*val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
	}

	lck_mtx_unlock(&kpc_config_lock);

	return 0;
}
Exemple #30
0
/*
 * Detach bpf from an interface.  This involves detaching each descriptor
 * associated with the interface, and leaving bd_bif NULL.  Notify each
 * descriptor as it's detached so that any sleepers wake up and get
 * ENXIO.
 */
void
bpfdetach(struct ifnet *ifp)
{
	struct bpf_if	*bp, *bp_prev, *bp_next;
	struct bpf_if	*bp_free = NULL;
	struct bpf_d	*d;

	
	lck_mtx_lock(bpf_mlock);

	/* Locate BPF interface information */
	bp_prev = NULL;
	for (bp = bpf_iflist; bp != NULL; bp = bp_next) {
		bp_next = bp->bif_next;
		if (ifp != bp->bif_ifp) {
			bp_prev = bp;
			continue;
		}
		
		while ((d = bp->bif_dlist) != NULL) {
			bpf_detachd(d);
			bpf_wakeup(d);
		}
	
		if (bp_prev) {
			bp_prev->bif_next = bp->bif_next;
		} else {
			bpf_iflist = bp->bif_next;
		}
		
		bp->bif_next = bp_free;
		bp_free = bp;
		
		ifnet_release(ifp);
	}

	lck_mtx_unlock(bpf_mlock);

	FREE(bp, M_DEVBUF);

}