static void ucom_init(void *arg) { DPRINTF("\n"); ucom_unrhdr = new_unrhdr(0, UCOM_UNIT_MAX - 1, NULL); mtx_init(&ucom_mtx, "UCOM MTX", NULL, MTX_DEF); }
/* * Initialize fileno bitmap */ void pfs_fileno_init(struct pfs_info *pi) { mtx_init(&pi->pi_mutex, "pfs_fileno", NULL, MTX_DEF); pi->pi_unrhdr = new_unrhdr(3, INT_MAX / NO_PID, &pi->pi_mutex); }
/* * Creates the cpuset for thread0. We make two sets: * * 0 - The root set which should represent all valid processors in the * system. It is initially created with a mask of all processors * because we don't know what processors are valid until cpuset_init() * runs. This set is immutable. * 1 - The default set which all processes are a member of until changed. * This allows an administrator to move all threads off of given cpus to * dedicate them to high priority tasks or save power etc. */ struct cpuset * cpuset_thread0(void) { struct cpuset *set; int error; cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); /* * Create the root system set for the whole machine. Doesn't use * cpuset_create() due to NULL parent. */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); CPU_FILL(&set->cs_mask); LIST_INIT(&set->cs_children); LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); set->cs_ref = 1; set->cs_flags = CPU_SET_ROOT; cpuset_zero = set; cpuset_root = &set->cs_mask; /* * Now derive a default, modifiable set from that to give out. */ set = uma_zalloc(cpuset_zone, M_WAITOK); error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); KASSERT(error == 0, ("Error creating default set: %d\n", error)); /* * Initialize the unit allocator. 0 and 1 are allocated above. */ cpuset_unr = new_unrhdr(2, INT_MAX, NULL); return (set); }
static void ucom_init(void *arg) { DPRINTF("\n"); kprintf("ucom init\n"); ucom_unrhdr = new_unrhdr(0, UCOM_UNIT_MAX - 1, NULL); lockinit(&ucom_lock, "UCOM LOCK", 0, 0); }
void drm_gem_names_init(struct drm_gem_names *names) { names->unr = new_unrhdr(1, INT_MAX, NULL); /* XXXKIB */ names->names_hash = hashinit(1000 /* XXXKIB */, M_GEM_NAMES, &names->hash_mask); mtx_init(&names->lock, "drmnames", NULL, MTX_DEF); }
static void soaio_init(void) { soaio_lifetime = AIOD_LIFETIME_DEFAULT; STAILQ_INIT(&soaio_jobs); mtx_init(&soaio_jobs_lock, "soaio jobs", NULL, MTX_DEF); soaio_kproc_unr = new_unrhdr(1, INT_MAX, NULL); TASK_INIT(&soaio_kproc_task, 0, soaio_kproc_create, NULL); if (soaio_target_procs > 0) taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task); }
static int uether_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: ueunit = new_unrhdr(0, INT_MAX, NULL); break; case MOD_UNLOAD: break; default: return (EOPNOTSUPP); } return (0); }
int drm_gem_init(struct drm_device *dev) { struct drm_gem_mm *mm; drm_gem_names_init(&dev->object_names); mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_WAITOK); dev->mm_private = mm; if (drm_ht_create(&mm->offset_hash, 19) != 0) { free(mm, DRM_MEM_DRIVER); return (ENOMEM); } mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL); return (0); }
static void vpid_init(void) { /* * VPID 0 is required when the "enable VPID" execution control is * disabled. * * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the * unit number allocator does not have sufficient unique VPIDs to * satisfy the allocation. * * The remaining VPIDs are managed by the unit number allocator. */ vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); }
/* * Mount the filesystem */ static int devfs_mount(struct mount *mp) { int error; struct devfs_mount *fmp; struct vnode *rvp; if (devfs_unr == NULL) devfs_unr = new_unrhdr(0, INT_MAX, NULL); error = 0; if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS)) return (EOPNOTSUPP); fmp = malloc(sizeof *fmp, M_DEVFS, M_WAITOK | M_ZERO); fmp->dm_idx = alloc_unr(devfs_unr); sx_init(&fmp->dm_lock, "devfsmount"); fmp->dm_holdcnt = 1; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED; #ifdef MAC mp->mnt_flag |= MNT_MULTILABEL; #endif MNT_IUNLOCK(mp); fmp->dm_mount = mp; mp->mnt_data = (void *) fmp; vfs_getnewfsid(mp); fmp->dm_rootdir = devfs_vmkdir(fmp, NULL, 0, NULL, DEVFS_ROOTINO); error = devfs_root(mp, LK_EXCLUSIVE, &rvp); if (error) { sx_destroy(&fmp->dm_lock); free_unr(devfs_unr, fmp->dm_idx); free(fmp, M_DEVFS); return (error); } VOP_UNLOCK(rvp, 0); vfs_mountedfrom(mp, "devfs"); return (0); }
static int uhso_driver_loaded(struct module *mod, int what, void *arg) { switch (what) { case MOD_LOAD: /* register our autoinstall handler */ uhso_etag = EVENTHANDLER_REGISTER(usb_dev_configured, uhso_test_autoinst, NULL, EVENTHANDLER_PRI_ANY); /* create our unit allocator for inet devs */ uhso_ifnet_unit = new_unrhdr(0, INT_MAX, NULL); break; case MOD_UNLOAD: EVENTHANDLER_DEREGISTER(usb_dev_configured, uhso_etag); delete_unrhdr(uhso_ifnet_unit); break; default: return (EOPNOTSUPP); } return (0); }
int drm_gem_init(struct drm_device *dev) { struct drm_gem_mm *mm; drm_gem_names_init(&dev->object_names); mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_NOWAIT); if (!mm) { DRM_ERROR("out of memory\n"); return -ENOMEM; } dev->mm_private = mm; if (drm_ht_create(&mm->offset_hash, 19)) { free(mm, DRM_MEM_DRIVER); return -ENOMEM; } mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL); return 0; }
static void pts_init(void *unused) { pts_pool = new_unrhdr(0, INT_MAX, NULL); }
static uma_zone_t pipe_zone; static struct unrhdr *pipeino_unr; static dev_t pipedev_ino; SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); static void pipeinit(void *dummy __unused) { pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair), pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini, UMA_ALIGN_PTR, 0); KASSERT(pipe_zone != NULL, ("pipe_zone not initialized")); pipeino_unr = new_unrhdr(1, INT32_MAX, NULL); KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized")); pipedev_ino = devfs_alloc_cdp_inode(); KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized")); } static int pipe_zone_ctor(void *mem, int size, void *arg, int flags) { struct pipepair *pp; struct pipe *rpipe, *wpipe; KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size")); pp = (struct pipepair *)mem;
static void dtrace_load(void *dummy) { dtrace_provider_id_t id; /* Hook into the trap handler. */ dtrace_trap_func = dtrace_trap; /* Hang our hook for thread switches. */ dtrace_vtime_switch_func = dtrace_vtime_switch; /* Hang our hook for exceptions. */ dtrace_invop_init(); /* * XXX This is a short term hack to avoid having to comment * out lots and lots of lock/unlock calls. */ mutex_init(&mod_lock,"XXX mod_lock hack", MUTEX_DEFAULT, NULL); /* * Initialise the mutexes without 'witness' because the dtrace * code is mostly written to wait for memory. To have the * witness code change a malloc() from M_WAITOK to M_NOWAIT * because a lock is held would surely create a panic in a * low memory situation. And that low memory situation might be * the very problem we are trying to trace. */ mutex_init(&dtrace_lock,"dtrace probe state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_provider_lock,"dtrace provider state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_meta_lock,"dtrace meta-provider state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_errlock,"dtrace error lock", MUTEX_DEFAULT, NULL); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); mutex_enter(&cpu_lock); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); dtrace_state_cache = kmem_cache_create("dtrace_state_cache", sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), offsetof(dtrace_probe_t, dtpr_nextmod), offsetof(dtrace_probe_t, dtpr_prevmod)); dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), offsetof(dtrace_probe_t, dtpr_nextfunc), offsetof(dtrace_probe_t, dtpr_prevfunc)); dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), offsetof(dtrace_probe_t, dtpr_nextname), offsetof(dtrace_probe_t, dtpr_prevname)); if (dtrace_retain_max < 1) { cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " "setting to 1", dtrace_retain_max); dtrace_retain_max = 1; } /* * Now discover our toxic ranges. */ dtrace_toxic_ranges(dtrace_toxrange_add); /* * Before we register ourselves as a provider to our own framework, * we would like to assert that dtrace_provider is NULL -- but that's * not true if we were loaded as a dependency of a DTrace provider. * Once we've registered, we can assert that dtrace_provider is our * pseudo provider. */ (void) dtrace_register("dtrace", &dtrace_provider_attr, DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); ASSERT(dtrace_provider != NULL); ASSERT((dtrace_provider_id_t)dtrace_provider == id); dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "END", 0, NULL); dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "ERROR", 1, NULL); mutex_exit(&cpu_lock); /* * If DTrace helper tracing is enabled, we need to allocate the * trace buffer and initialize the values. */ if (dtrace_helptrace_enabled) { ASSERT(dtrace_helptrace_buffer == NULL); dtrace_helptrace_buffer = kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); dtrace_helptrace_next = 0; } mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); mutex_enter(&cpu_lock); /* Setup the boot CPU */ (void) dtrace_cpu_setup(CPU_CONFIG, 0); mutex_exit(&cpu_lock); #if __FreeBSD_version < 800039 /* Enable device cloning. */ clone_setup(&dtrace_clones); /* Setup device cloning events. */ eh_tag = EVENTHANDLER_REGISTER(dev_clone, dtrace_clone, 0, 1000); #else dtrace_dev = make_dev(&dtrace_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "dtrace/dtrace"); helper_dev = make_dev(&helper_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, "dtrace/helper"); #endif return; }
static void dtrace_load(void *dummy) { dtrace_provider_id_t id; #ifdef EARLY_AP_STARTUP int i; #endif #ifndef illumos /* * DTrace uses negative logic for the destructive mode switch, so it * is required to translate from the sysctl which uses positive logic. */ if (dtrace_allow_destructive) dtrace_destructive_disallow = 0; else dtrace_destructive_disallow = 1; #endif /* Hook into the trap handler. */ dtrace_trap_func = dtrace_trap; /* Hang our hook for thread switches. */ dtrace_vtime_switch_func = dtrace_vtime_switch; /* Hang our hook for exceptions. */ dtrace_invop_init(); dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 0, 0, 0); dtrace_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); /* Register callbacks for linker file load and unload events. */ dtrace_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, dtrace_kld_load, NULL, EVENTHANDLER_PRI_ANY); dtrace_kld_unload_try_tag = EVENTHANDLER_REGISTER(kld_unload_try, dtrace_kld_unload_try, NULL, EVENTHANDLER_PRI_ANY); /* * Initialise the mutexes without 'witness' because the dtrace * code is mostly written to wait for memory. To have the * witness code change a malloc() from M_WAITOK to M_NOWAIT * because a lock is held would surely create a panic in a * low memory situation. And that low memory situation might be * the very problem we are trying to trace. */ mutex_init(&dtrace_lock,"dtrace probe state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_provider_lock,"dtrace provider state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_meta_lock,"dtrace meta-provider state", MUTEX_DEFAULT, NULL); #ifdef DEBUG mutex_init(&dtrace_errlock,"dtrace error lock", MUTEX_DEFAULT, NULL); #endif mutex_enter(&cpu_lock); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); dtrace_state_cache = kmem_cache_create("dtrace_state_cache", sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), offsetof(dtrace_probe_t, dtpr_nextmod), offsetof(dtrace_probe_t, dtpr_prevmod)); dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), offsetof(dtrace_probe_t, dtpr_nextfunc), offsetof(dtrace_probe_t, dtpr_prevfunc)); dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), offsetof(dtrace_probe_t, dtpr_nextname), offsetof(dtrace_probe_t, dtpr_prevname)); if (dtrace_retain_max < 1) { cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " "setting to 1", dtrace_retain_max); dtrace_retain_max = 1; } /* * Now discover our toxic ranges. */ dtrace_toxic_ranges(dtrace_toxrange_add); /* * Before we register ourselves as a provider to our own framework, * we would like to assert that dtrace_provider is NULL -- but that's * not true if we were loaded as a dependency of a DTrace provider. * Once we've registered, we can assert that dtrace_provider is our * pseudo provider. */ (void) dtrace_register("dtrace", &dtrace_provider_attr, DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); ASSERT(dtrace_provider != NULL); ASSERT((dtrace_provider_id_t)dtrace_provider == id); dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "END", 0, NULL); dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "ERROR", 1, NULL); mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); #ifdef EARLY_AP_STARTUP CPU_FOREACH(i) { (void) dtrace_cpu_setup(CPU_CONFIG, i); } #else /* Setup the boot CPU */ (void) dtrace_cpu_setup(CPU_CONFIG, 0); #endif mutex_exit(&cpu_lock); dtrace_dev = make_dev(&dtrace_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "dtrace/dtrace"); helper_dev = make_dev(&helper_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, "dtrace/helper"); }