void dt_regset_free(dt_regset_t *drp, int reg) { assert(reg > 0 && reg < drp->dr_size); assert(BT_TEST(drp->dr_bitmap, reg) != 0); BT_CLEAR(drp->dr_bitmap, reg); }
RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RTMPARGS Args; RT_ASSERT_INTS_ON(); Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = RTMpCpuId(); Args.cHits = 0; /* The caller is supposed to have disabled preemption, but take no chances. */ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&PreemptState); RTSOLCPUSET CpuSet; for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) CpuSet.auCpus[0] = (ulong_t)-1L; BT_CLEAR(CpuSet.auCpus, RTMpCpuId()); rtMpSolCrossCall(&CpuSet, rtMpSolOnOtherCpusWrapper, &Args); RTThreadPreemptRestore(&PreemptState); return VINF_SUCCESS; }
/* * Clean up scheduler activations state associated with an exiting * (or execing) lwp. t is always the current thread. */ void schedctl_lwp_cleanup(kthread_t *t) { sc_shared_t *ssp = t->t_schedctl; proc_t *p = ttoproc(t); sc_page_ctl_t *pagep; index_t index; ASSERT(MUTEX_NOT_HELD(&p->p_lock)); thread_lock(t); /* protect against ts_tick and ts_update */ t->t_schedctl = NULL; t->t_sc_uaddr = 0; thread_unlock(t); /* * Remove the context op to avoid the final call to * schedctl_save when switching away from this lwp. */ (void) removectx(t, ssp, schedctl_save, schedctl_restore, schedctl_fork, NULL, NULL, NULL); /* * Do not unmap the shared page until the process exits. * User-level library code relies on this for adaptive mutex locking. */ mutex_enter(&p->p_sc_lock); ssp->sc_state = SC_FREE; pagep = schedctl_page_lookup(ssp); index = (index_t)(ssp - pagep->spc_base); BT_CLEAR(pagep->spc_map, index); pagep->spc_space += sizeof (sc_shared_t); mutex_exit(&p->p_sc_lock); }
static void kaif_wapt_release(kmdb_wapt_t *wp) { int id = KAIF_WPPRIV2ID(wp); ASSERT(BT_TEST(&kaif_waptmap, id)); BT_CLEAR(&kaif_waptmap, id); }
/*ARGSUSED1*/ static int kcpc_close(dev_t dev, int flags, int otyp, cred_t *cr) { rw_enter(&kcpc_cpuctx_lock, RW_WRITER); BT_CLEAR(kcpc_cpumap, getminor(dev)); if (--kcpc_cpuctx == 0) { kmem_free(kcpc_cpumap, BT_SIZEOFMAP(max_cpuid + 1)); kcpc_cpumap = NULL; } ASSERT(kcpc_cpuctx >= 0); rw_exit(&kcpc_cpuctx_lock); return (0); }