void ufsdirhash_init(void) { mutex_init(&ufsdirhash_lock, MUTEX_DEFAULT, IPL_NONE); ufsdirhashblk_cache = pool_cache_init(DH_NBLKOFF * sizeof(daddr_t), 0, 0, 0, "dirhashblk", NULL, IPL_NONE, NULL, NULL, NULL); ufsdirhash_cache = pool_cache_init(sizeof(struct dirhash), 0, 0, 0, "dirhash", NULL, IPL_NONE, NULL, NULL, NULL); TAILQ_INIT(&ufsdirhash_list); ufsdirhash_sysctl_init(); }
void soinit2(void) { socket_cache = pool_cache_init(sizeof(struct socket), 0, 0, 0, "socket", NULL, IPL_SOFTNET, NULL, NULL, NULL); }
/* * mutex_obj_init: * * Initialize the mutex object store. */ void mutex_obj_init(void) { mutex_obj_cache = pool_cache_init(sizeof(struct kmutexobj), coherency_unit, 0, 0, "mutex", NULL, IPL_NONE, mutex_obj_ctor, NULL, NULL); }
void radix_tree_init(void) { radix_tree_node_cache = pool_cache_init(sizeof(struct radix_tree_node), 0, 0, 0, "radix_tree_node", NULL, IPL_NONE, radix_tree_node_ctor, NULL, NULL); KASSERT(radix_tree_node_cache != NULL); }
void soinit2(void) { socket_cache = pool_cache_init(sizeof(struct socket), SOCK_CACHE_SIZE, 0, 0, "socket", NULL, IPL_SOFTNET, NULL, NULL, NULL); if(!socket_cache) { panic("soinit2 cannot initiate pool cache"); } }
/* * Initialize the quota system. */ void dqinit(void) { mutex_init(&dqlock, MUTEX_DEFAULT, IPL_NONE); cv_init(&dqcv, "quota"); dqhashtbl = hashinit(desiredvnodes, HASH_LIST, true, &dqhash); dquot_cache = pool_cache_init(sizeof(struct dquot), 0, 0, 0, "ufsdq", NULL, IPL_NONE, NULL, NULL, NULL); }
/* * `Attach' the random device. We use the timing of this event as * another potential source of initial entropy. */ void rndattach(int num) { uint32_t c; /* Trap unwary players who don't call rnd_init() early. */ KASSERT(rnd_ready); rnd_temp_buffer_cache = pool_cache_init(RND_TEMP_BUFFER_SIZE, 0, 0, 0, "rndtemp", NULL, IPL_NONE, NULL, NULL, NULL); rnd_ctx_cache = pool_cache_init(sizeof(struct rnd_ctx), 0, 0, 0, "rndctx", NULL, IPL_NONE, NULL, NULL, NULL); percpu_urandom_cprng = percpu_alloc(sizeof(struct cprng_strong *)); /* Mix in another counter. */ c = rndpseudo_counter(); mutex_spin_enter(&rndpool_mtx); rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); mutex_spin_exit(&rndpool_mtx); }
static void qc_init(vmem_t *vm, size_t qcache_max, int ipl) { qcache_t *prevqc; struct pool_allocator *pa; int qcache_idx_max; int i; KASSERT((qcache_max & vm->vm_quantum_mask) == 0); if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; } vm->vm_qcache_max = qcache_max; pa = &vm->vm_qcache_allocator; memset(pa, 0, sizeof(*pa)); pa->pa_alloc = qc_poolpage_alloc; pa->pa_free = qc_poolpage_free; pa->pa_pagesz = qc_poolpage_size(qcache_max); qcache_idx_max = qcache_max >> vm->vm_quantum_shift; prevqc = NULL; for (i = qcache_idx_max; i > 0; i--) { qcache_t *qc = &vm->vm_qcache_store[i - 1]; size_t size = i << vm->vm_quantum_shift; pool_cache_t pc; qc->qc_vmem = vm; snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", vm->vm_name, size); pc = pool_cache_init(size, ORDER2SIZE(vm->vm_quantum_shift), 0, PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */, qc->qc_name, pa, ipl, NULL, NULL, NULL); KASSERT(pc); qc->qc_cache = pc; KASSERT(qc->qc_cache != NULL); /* XXX */ if (prevqc != NULL && qc->qc_cache->pc_pool.pr_itemsperpage == prevqc->qc_cache->pc_pool.pr_itemsperpage) { pool_cache_destroy(qc->qc_cache); vm->vm_qcache[i - 1] = prevqc; continue; } qc->qc_cache->pc_pool.pr_qcache = qc; vm->vm_qcache[i - 1] = qc; prevqc = qc; } }
/* * Initialize UFS filesystems, done only once. */ void ufs_init(void) { if (ufs_initcount++ > 0) return; ufs_direct_cache = pool_cache_init(sizeof(struct direct), 0, 0, 0, "ufsdir", NULL, IPL_NONE, NULL, NULL, NULL); ufs_ihashinit(); #ifdef QUOTA dqinit(); #endif #ifdef UFS_DIRHASH ufsdirhash_init(); #endif #ifdef UFS_EXTATTR ufs_extattr_init(); #endif }
void vfs_vnode_sysinit(void) { int error; vnode_cache = pool_cache_init(sizeof(vnode_t), 0, 0, 0, "vnodepl", NULL, IPL_NONE, NULL, NULL, NULL); KASSERT(vnode_cache != NULL); mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE); TAILQ_INIT(&vnode_free_list); TAILQ_INIT(&vnode_hold_list); TAILQ_INIT(&vrele_list); mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE); cv_init(&vdrain_cv, "vdrain"); cv_init(&vrele_cv, "vrele"); error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread, NULL, NULL, "vdrain"); KASSERT(error == 0); error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread, NULL, &vrele_lwp, "vrele"); KASSERT(error == 0); }
/* * Attach the CPU. * Discover interesting goop about the virtual address cache * (slightly funny place to do it, but this is where it is to be found). */ void cpu_attach(device_t parent, device_t dev, void *aux) { int node; long clk, sclk = 0; struct mainbus_attach_args *ma = aux; struct cpu_info *ci; const char *sep; register int i, l; int bigcache, cachesize; char buf[100]; int totalsize = 0; int linesize, dcachesize, icachesize; /* tell them what we have */ node = ma->ma_node; /* * Allocate cpu_info structure if needed. */ ci = alloc_cpuinfo((u_int)node); /* * Only do this on the boot cpu. Other cpu's call * cpu_reset_fpustate() from cpu_hatch() before they * call into the idle loop. * For other cpus, we need to call mi_cpu_attach() * and complete setting up cpcb. */ if (ci->ci_flags & CPUF_PRIMARY) { fpstate_cache = pool_cache_init(sizeof(struct fpstate64), SPARC64_BLOCK_SIZE, 0, 0, "fpstate", NULL, IPL_NONE, NULL, NULL, NULL); cpu_reset_fpustate(); } #ifdef MULTIPROCESSOR else { mi_cpu_attach(ci); ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); } for (i = 0; i < IPI_EVCNT_NUM; ++i) evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR, NULL, device_xname(dev), ipi_evcnt_names[i]); #endif evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL, device_xname(dev), "timer"); mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM); clk = prom_getpropint(node, "clock-frequency", 0); if (clk == 0) { /* * Try to find it in the OpenPROM root... */ clk = prom_getpropint(findroot(), "clock-frequency", 0); } if (clk) { /* Tell OS what frequency we run on */ ci->ci_cpu_clockrate[0] = clk; ci->ci_cpu_clockrate[1] = clk / 1000000; } sclk = prom_getpropint(findroot(), "stick-frequency", 0); ci->ci_system_clockrate[0] = sclk; ci->ci_system_clockrate[1] = sclk / 1000000; snprintf(buf, sizeof buf, "%s @ %s MHz", prom_getpropstring(node, "name"), clockfreq(clk)); snprintf(cpu_model, sizeof cpu_model, "%s (%s)", machine_model, buf); aprint_normal(": %s, UPA id %d\n", buf, ci->ci_cpuid); aprint_naive("\n"); if (ci->ci_system_clockrate[0] != 0) { aprint_normal_dev(dev, "system tick frequency %d MHz\n", (int)ci->ci_system_clockrate[1]); } aprint_normal_dev(dev, ""); bigcache = 0; icachesize = prom_getpropint(node, "icache-size", 0); if (icachesize > icache_size) icache_size = icachesize; linesize = l = prom_getpropint(node, "icache-line-size", 0); if (linesize > icache_line_size) icache_line_size = linesize; for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad icache line size %d", l); totalsize = icachesize; if (totalsize == 0) totalsize = l * prom_getpropint(node, "icache-nlines", 64) * prom_getpropint(node, "icache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "icache-associativity", 1); bigcache = cachesize; sep = ""; if (totalsize > 0) { aprint_normal("%s%ldK instruction (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); sep = ", "; } dcachesize = prom_getpropint(node, "dcache-size", 0); if (dcachesize > dcache_size) dcache_size = dcachesize; linesize = l = prom_getpropint(node, "dcache-line-size", 0); if (linesize > dcache_line_size) dcache_line_size = linesize; for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad dcache line size %d", l); totalsize = dcachesize; if (totalsize == 0) totalsize = l * prom_getpropint(node, "dcache-nlines", 128) * prom_getpropint(node, "dcache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "dcache-associativity", 1); if (cachesize > bigcache) bigcache = cachesize; if (totalsize > 0) { aprint_normal("%s%ldK data (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); sep = ", "; } linesize = l = prom_getpropint(node, "ecache-line-size", 0); for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad ecache line size %d", l); totalsize = prom_getpropint(node, "ecache-size", 0); if (totalsize == 0) totalsize = l * prom_getpropint(node, "ecache-nlines", 32768) * prom_getpropint(node, "ecache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "ecache-associativity", 1); if (cachesize > bigcache) bigcache = cachesize; if (totalsize > 0) { aprint_normal("%s%ldK external (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); } aprint_normal("\n"); if (ecache_min_line_size == 0 || linesize < ecache_min_line_size) ecache_min_line_size = linesize; /* * Now that we know the size of the largest cache on this CPU, * re-color our pages. */ uvm_page_recolor(atop(bigcache)); /* XXX */ }
static void ld_ataraid_attach(device_t parent, device_t self, void *aux) { struct ld_ataraid_softc *sc = device_private(self); struct ld_softc *ld = &sc->sc_ld; struct ataraid_array_info *aai = aux; struct ataraid_disk_info *adi = NULL; const char *level; struct vnode *vp; char unklev[32]; u_int i; ld->sc_dv = self; sc->sc_cbufpool = pool_cache_init(sizeof(struct cbuf), 0, 0, 0, "ldcbuf", NULL, IPL_BIO, cbufpool_ctor, cbufpool_dtor, sc); sc->sc_sih_cookie = softint_establish(SOFTINT_BIO, ld_ataraid_start_vstrategy, sc); sc->sc_aai = aai; /* this data persists */ ld->sc_maxxfer = MAXPHYS * aai->aai_width; /* XXX */ ld->sc_secperunit = aai->aai_capacity; ld->sc_secsize = 512; /* XXX */ ld->sc_maxqueuecnt = 128; /* XXX */ ld->sc_dump = ld_ataraid_dump; switch (aai->aai_level) { case AAI_L_SPAN: level = "SPAN"; ld->sc_start = ld_ataraid_start_span; sc->sc_iodone = ld_ataraid_iodone_raid0; break; case AAI_L_RAID0: level = "RAID-0"; ld->sc_start = ld_ataraid_start_raid0; sc->sc_iodone = ld_ataraid_iodone_raid0; break; case AAI_L_RAID1: level = "RAID-1"; ld->sc_start = ld_ataraid_start_raid0; sc->sc_iodone = ld_ataraid_iodone_raid0; break; case AAI_L_RAID0 | AAI_L_RAID1: level = "RAID-10"; ld->sc_start = ld_ataraid_start_raid0; sc->sc_iodone = ld_ataraid_iodone_raid0; break; default: snprintf(unklev, sizeof(unklev), "<unknown level 0x%x>", aai->aai_level); level = unklev; } aprint_naive(": ATA %s array\n", level); aprint_normal(": %s ATA %s array\n", ata_raid_type_name(aai->aai_type), level); if (ld->sc_start == NULL) { aprint_error_dev(ld->sc_dv, "unsupported array type\n"); return; } /* * We get a geometry from the device; use it. */ ld->sc_nheads = aai->aai_heads; ld->sc_nsectors = aai->aai_sectors; ld->sc_ncylinders = aai->aai_cylinders; /* * Configure all the component disks. */ for (i = 0; i < aai->aai_ndisks; i++) { adi = &aai->aai_disks[i]; vp = ata_raid_disk_vnode_find(adi); if (vp == NULL) { /* * XXX This is bogus. We should just mark the * XXX component as FAILED, and write-back new * XXX config blocks. */ break; } sc->sc_vnodes[i] = vp; } if (i == aai->aai_ndisks) { ld->sc_flags = LDF_ENABLED; goto finish; } for (i = 0; i < aai->aai_ndisks; i++) { vp = sc->sc_vnodes[i]; sc->sc_vnodes[i] = NULL; if (vp != NULL) (void) vn_close(vp, FREAD|FWRITE, NOCRED); } finish: #if NBIO > 0 if (bio_register(self, ld_ataraid_bioctl) != 0) panic("%s: bioctl registration failed\n", device_xname(ld->sc_dv)); #endif SIMPLEQ_INIT(&sc->sc_cbufq); ldattach(ld); }