void sysctl_register() { sysctl_register_oid(&sysctl__kern_insomnia); sysctl_register_oid(&sysctl__kern_insomnia_lidsleep); sysctl_register_oid(&sysctl__kern_insomnia_ac_state); sysctl_register_oid(&sysctl__kern_insomnia_battery_state); sysctl_register_oid(&sysctl__kern_insomnia_debug); }
/** * ntfs_debug_init - initialize debugging for ntfs * * Initialize the error buffer lock and if compiled with DEBUG, register our * sysctl. * * Note we cannot use ntfs_debug(), ntfs_warning(), and ntfs_error() before * this function has been called. */ void ntfs_debug_init(void) { mtx_init(&ntfs_err_buf_lock, "ntfs err buf lock", NULL, MTX_SPIN); #ifdef DEBUG /* Register our sysctl. */ sysctl_register_oid(&sysctl__vfs_generic_ntfs); sysctl_register_oid(&sysctl__vfs_generic_ntfs_debug_messages); #endif }
static void fuse_sysctl_macfuse_start(void) { int i; sysctl_register_oid(&sysctl__macfuse); for (i = 0; fuse_sysctl_list_macfuse[i]; i++) { sysctl_register_oid(fuse_sysctl_list_macfuse[i]); } }
static void mac_count_init_bsd(struct mac_policy_conf *conf) { sysctl_register_oid(&sysctl__security_mac_count); sysctl_register_oid(&sysctl__security_mac_retcontrol); #include "count_reg.h" }
bool AutoThrottler::setup(OSObject* owner) { if (setupDone) return true; workLoop = IOWorkLoop::workLoop(); if (workLoop == 0) return false; perfTimer = IOTimerEventSource::timerEventSource(owner, (IOTimerEventSource::Action) &perfTimerWrapper); if (perfTimer == 0) return false; /* from Superhai (modified by mercurysquad) */ cpu_count = 0; OSDictionary* service; mach_timespec_t serviceTimeout = { 60, 0 }; // in seconds totalTimerEvents = 0; IOService* firstCPU = IOService::waitForService(IOService::serviceMatching("IOCPU"), &serviceTimeout); if (!firstCPU) { warn("IOKit CPUs not found. Auto-throttle may not work.\n"); return false; } else { // we got first cpu, so the others should also be available by now. get them service = IOService::serviceMatching("IOCPU"); } OSIterator* iterator = IOService::getMatchingServices(service); if (!iterator) { warn("IOKit CPU iterator couldn't be created. Auto-throttle may not work.\n"); return false; } IOCPU * cpu; while ((cpu = OSDynamicCast(IOCPU, iterator->getNextObject()))) { /*dbg("Got I/O Kit CPU %d (%u) named %s", cpu_count, (unsigned int)(cpu->getCPUNumber(), cpu->getCPUName()->getCStringNoCopy()); */ mach_cpu[cpu_count] = cpu->getMachProcessor(); if (cpu_count++ > max_cpus) break; } selfHost = host_priv_self(); if (workLoop->addEventSource(perfTimer) != kIOReturnSuccess) return false; currentPState = NumberOfPStates - 1; perfTimer->setTimeoutMS(throttleQuantum * (1 + currentPState)); clock_get_uptime(&lastTime); if (!targetCPULoad) targetCPULoad = defaultTargetLoad; // % x10 sysctl_register_oid(&sysctl__kern_cputhrottle_targetload); sysctl_register_oid(&sysctl__kern_cputhrottle_auto); setupDone = true; return true; }
void fuse_sysctl_start(void) { int i; #if OSXFUSE_ENABLE_MACFUSE_MODE osxfuse_lock_group = lck_grp_alloc_init("osxfuse", NULL); osxfuse_sysctl_lock = lck_mtx_alloc_init(osxfuse_lock_group, NULL); #endif sysctl_register_oid(&sysctl__osxfuse); for (i = 0; fuse_sysctl_list[i]; i++) { sysctl_register_oid(fuse_sysctl_list[i]); } }
void spl_kstat_init() { /* * Create the kstat root OID */ sysctl_register_oid(&sysctl__kstat); }
/* * Register sysctl table */ cfs_sysctl_table_header_t * cfs_register_sysctl_table (cfs_sysctl_table_t *table, int arg) { cfs_sysctl_table_t item; int i = 0; while ((item = table[i++]) != NULL) sysctl_register_oid(item); return table; }
bool com_reidburke_air_IntelEnhancedSpeedStep::start(IOService *provider) { bool res = super::start(provider); if (!res) return false; dbg("Starting\n"); /* Create PState tables */ if (!createPStateTable(PStates, &NumberOfPStates)) return false; // Set the frequency list for sysctl char* freqList = getFreqList(); strncpy(frequencyList, freqList,1024); delete[] freqList; char* voltageList = getVoltageList(true); strncpy(originalVoltages, voltageList, 1024); delete[] voltageList; sysctl_register_oid(&sysctl__kern_cputhrottle_curfreq); sysctl_register_oid(&sysctl__kern_cputhrottle_curvolt); sysctl_register_oid(&sysctl__kern_cputhrottle_freqs); sysctl_register_oid(&sysctl__kern_cputhrottle_usage); sysctl_register_oid(&sysctl__kern_cputhrottle_avgfreq); sysctl_register_oid(&sysctl__kern_cputhrottle_factoryvolts); sysctl_register_oid(&sysctl__kern_cputhrottle_totalthrottles); sysctl_register_oid(&sysctl__kern_cputhrottle_ctl); if (DefaultPState != -1 && DefaultPState < NumberOfPStates) // If a default Pstate was specified in info.plist { dbg("Throttling to default PState %d as specified in Info.plist\n", DefaultPState); throttleAllCPUs(&PStates[DefaultPState]); // then throttle to that value throttleAllCPUs(&PStates[DefaultPState]); } if ( autostart != 0 && autostart->unsigned8BitValue() == 1 ) { // Now turn on our auto-throttler if (Throttler->setup((OSObject*) Throttler) == false) warn("Auto-throttler could not be setup, start it manually later.\n"); else Throttler->setEnabled(true); } return true; }
/****************************************************************************** * Generic MIB initialisation. * * This is a hack, and should be replaced with SYSINITs * at some point. */ void sysctl_mib_init(void) { cputype = cpu_type(); cpusubtype = cpu_subtype(); cputhreadtype = cpu_threadtype(); #if defined(__i386__) || defined (__x86_64__) cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit; #else #error Unsupported arch #endif /* * Populate the optional portion of the hw.* MIB. * * XXX This could be broken out into parts of the code * that actually directly relate to the functions in * question. */ if (cputhreadtype != CPU_THREADTYPE_NONE) { sysctl_register_oid(&sysctl__hw_cputhreadtype); } #if defined (__i386__) || defined (__x86_64__) /* hw.cpufamily */ cpufamily = cpuid_cpufamily(); /* hw.cacheconfig */ cacheconfig[0] = ml_cpu_cache_sharing(0); cacheconfig[1] = ml_cpu_cache_sharing(1); cacheconfig[2] = ml_cpu_cache_sharing(2); cacheconfig[3] = ml_cpu_cache_sharing(3); cacheconfig[4] = 0; /* hw.cachesize */ cachesize[0] = ml_cpu_cache_size(0); cachesize[1] = ml_cpu_cache_size(1); cachesize[2] = ml_cpu_cache_size(2); cachesize[3] = ml_cpu_cache_size(3); cachesize[4] = 0; /* hw.packages */ packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count) / cpuid_info()->thread_count; #else #error unknown architecture #endif /* !__i386__ && !__x86_64 && !__arm__ */ }
kern_return_t OldSysctl_start (kmod_info_t * ki, void * d) { sysctl_register_oid(&sysctl__kern_VAR_KERN_A); sysctl_register_oid(&sysctl__TREE); sysctl_register_oid(&sysctl__TREE_VAR_TREE_B); sysctl_register_oid(&sysctl__TREE_SUBTREE); sysctl_register_oid(&sysctl__TREE_SUBTREE_VAR_SUBTREE_C); sysctl_register_oid(&sysctl__TREE_SUBTREE_VAR_SUBTREE_D); sysctl_register_oid(&sysctl__TREE_SUBTREE_VAR_SUBTREE_E); sysctl_register_oid(&sysctl__TREE_SUBTREE_VAR_SUBTREE_F); sysctl_register_oid(&sysctl__TREE_SUBTREE_VAR_SUBTREE_G); uprintf("KEXT:%s has been loaded!\n", ki->name); printf("KEXT:%s has been loaded!\n", ki->name); return KERN_SUCCESS; }
/* start function for Insomnia, fixed send_event to match other code */ bool Insomnia::start(IOService* provider) { IOLog("Insomnia:start\n"); lastLidState = true; if (!super::start(provider)) { IOLog("Insomnia::start: super::start failed\n"); return false; } sysctl_register_oid(&sysctl__kern_lidsleep); Insomnia::send_event(kIOPMDisableClamshell);// | kIOPMPreventSleep); return true; }
static void init_oid_tree_node(struct sysctl_oid_list* parent, char *name, sysctl_tree_node_t* node) { strlcpy(node->tn_kstat_name, name, KSTAT_STRLEN); node->tn_oid.oid_parent = parent; node->tn_oid.oid_link.sle_next = 0; node->tn_oid.oid_number = OID_AUTO; node->tn_oid.oid_arg2 = 0; node->tn_oid.oid_name = &node->tn_kstat_name[0]; node->tn_oid.oid_descr = ""; node->tn_oid.oid_version = SYSCTL_OID_VERSION; node->tn_oid.oid_refcnt = 0; node->tn_oid.oid_handler = 0; node->tn_oid.oid_kind = CTLTYPE_NODE|CTLFLAG_RW|CTLFLAG_OID2; node->tn_oid.oid_fmt = "N"; node->tn_oid.oid_arg1 = (void*)(&node->tn_children); sysctl_register_oid(&node->tn_oid); node->tn_next = tree_nodes; tree_nodes = node; }
/* Register a new filesystem type in the global table */ static int vfs_register(struct vfsconf *vfc) { struct sysctl_oid *oidp; struct vfsops *vfsops; static int once; if (!once) { vattr_null(&va_null); once = 1; } if (vfc->vfc_version != VFS_VERSION) { printf("ERROR: filesystem %s, unsupported ABI version %x\n", vfc->vfc_name, vfc->vfc_version); return (EINVAL); } if (vfs_byname(vfc->vfc_name) != NULL) return EEXIST; vfc->vfc_typenum = maxvfsconf++; TAILQ_INSERT_TAIL(&vfsconf, vfc, vfc_list); /* * If this filesystem has a sysctl node under vfs * (i.e. vfs.xxfs), then change the oid number of that node to * match the filesystem's type number. This allows user code * which uses the type number to read sysctl variables defined * by the filesystem to continue working. Since the oids are * in a sorted list, we need to make sure the order is * preserved by re-registering the oid after modifying its * number. */ sysctl_lock(); SLIST_FOREACH(oidp, &sysctl__vfs_children, oid_link) if (strcmp(oidp->oid_name, vfc->vfc_name) == 0) { sysctl_unregister_oid(oidp); oidp->oid_number = vfc->vfc_typenum; sysctl_register_oid(oidp); break; } sysctl_unlock(); /* * Initialise unused ``struct vfsops'' fields, to use * the vfs_std*() functions. Note, we need the mount * and unmount operations, at the least. The check * for vfsops available is just a debugging aid. */ KASSERT(vfc->vfc_vfsops != NULL, ("Filesystem %s has no vfsops", vfc->vfc_name)); /* * Check the mount and unmount operations. */ vfsops = vfc->vfc_vfsops; KASSERT(vfsops->vfs_mount != NULL, ("Filesystem %s has no mount op", vfc->vfc_name)); KASSERT(vfsops->vfs_unmount != NULL, ("Filesystem %s has no unmount op", vfc->vfc_name)); if (vfsops->vfs_root == NULL) /* return file system's root vnode */ vfsops->vfs_root = vfs_stdroot; if (vfsops->vfs_quotactl == NULL) /* quota control */ vfsops->vfs_quotactl = vfs_stdquotactl; if (vfsops->vfs_statfs == NULL) /* return file system's status */ vfsops->vfs_statfs = vfs_stdstatfs; if (vfsops->vfs_sync == NULL) /* * flush unwritten data (nosync) * file systems can use vfs_stdsync * explicitly by setting it in the * vfsop vector. */ vfsops->vfs_sync = vfs_stdnosync; if (vfsops->vfs_vget == NULL) /* convert an inode number to a vnode */ vfsops->vfs_vget = vfs_stdvget; if (vfsops->vfs_fhtovp == NULL) /* turn an NFS file handle into a vnode */ vfsops->vfs_fhtovp = vfs_stdfhtovp; if (vfsops->vfs_checkexp == NULL) /* check if file system is exported */ vfsops->vfs_checkexp = vfs_stdcheckexp; if (vfsops->vfs_init == NULL) /* file system specific initialisation */ vfsops->vfs_init = vfs_stdinit; if (vfsops->vfs_uninit == NULL) /* file system specific uninitialisation */ vfsops->vfs_uninit = vfs_stduninit; if (vfsops->vfs_extattrctl == NULL) /* extended attribute control */ vfsops->vfs_extattrctl = vfs_stdextattrctl; if (vfsops->vfs_sysctl == NULL) vfsops->vfs_sysctl = vfs_stdsysctl; /* * Call init function for this VFS... */ (*(vfc->vfc_vfsops->vfs_init))(vfc); return 0; }
kstat_t * kstat_create(char *ks_module, int ks_instance, char *ks_name, char *ks_class, uchar_t ks_type, ulong_t ks_ndata, uchar_t ks_flags) { kstat_t *ksp = 0; ekstat_t *e = 0; size_t size = 0; /* * Allocate memory for the new kstat header. */ size = sizeof (ekstat_t); e = (ekstat_t *)kalloc(size); bzero(e, size); if (e == NULL) { cmn_err(CE_NOTE, "kstat_create('%s', %d, '%s'): " "insufficient kernel memory", ks_module, ks_instance, ks_name); return (NULL); } e->e_size = size; cv_init(&e->e_cv, NULL, CV_DEFAULT, NULL); /* * Initialize as many fields as we can. The caller may reset * ks_lock, ks_update, ks_private, and ks_snapshot as necessary. * Creators of virtual kstats may also reset ks_data. It is * also up to the caller to initialize the kstat data section, * if necessary. All initialization must be complete before * calling kstat_install(). */ ksp = &e->e_ks; ksp->ks_crtime = gethrtime(); kstat_set_string(ksp->ks_module, ks_module); ksp->ks_instance = ks_instance; kstat_set_string(ksp->ks_name, ks_name); ksp->ks_type = ks_type; kstat_set_string(ksp->ks_class, ks_class); ksp->ks_flags = ks_flags | KSTAT_FLAG_INVALID; ksp->ks_ndata = ks_ndata; ksp->ks_snaptime = ksp->ks_crtime; ksp->ks_lock = 0; /* * Initialise the sysctl that represents this kstat */ e->e_children.slh_first = 0; e->e_oid.oid_parent = get_kstat_parent(&sysctl__kstat_children, ksp->ks_module, ksp->ks_class); e->e_oid.oid_link.sle_next = 0; e->e_oid.oid_number = OID_AUTO; e->e_oid.oid_arg2 = 0; e->e_oid.oid_name = ksp->ks_name; e->e_oid.oid_descr = ""; e->e_oid.oid_version = SYSCTL_OID_VERSION; e->e_oid.oid_refcnt = 0; e->e_oid.oid_handler = 0; e->e_oid.oid_kind = CTLTYPE_NODE|CTLFLAG_RW|CTLFLAG_OID2; e->e_oid.oid_fmt = "N"; e->e_oid.oid_arg1 = (void*)(&e->e_children); sysctl_register_oid(&e->e_oid); return (ksp); }
void kstat_install(kstat_t *ksp) { ekstat_t *e = (ekstat_t*)ksp; kstat_named_t *named_base = 0; sysctl_leaf_t *vals_base = 0; sysctl_leaf_t *params = 0; int oid_permissions = CTLFLAG_RD; if (ksp->ks_type == KSTAT_TYPE_NAMED) { if (ksp->ks_flags & KSTAT_FLAG_WRITABLE) { oid_permissions |= CTLFLAG_RW; } // Create the leaf node OID objects e->e_vals = (sysctl_leaf_t*)kalloc(ksp->ks_ndata * sizeof(sysctl_leaf_t)); bzero(e->e_vals, ksp->ks_ndata * sizeof(sysctl_leaf_t)); e->e_num_vals = ksp->ks_ndata; named_base = (kstat_named_t*)(ksp->ks_data); vals_base = e->e_vals; for (int i=0; i < ksp->ks_ndata; i++) { int oid_valid = 1; kstat_named_t *named = &named_base[i]; sysctl_leaf_t *val = &vals_base[i]; // Perform basic initialisation of the sysctl. // // The sysctl will be kstat.<module>.<class>.<name>.<data name> snprintf(val->l_name, KSTAT_STRLEN, "%s", named->name); val->l_oid.oid_parent = &e->e_children; val->l_oid.oid_link.sle_next = 0; val->l_oid.oid_number = OID_AUTO; val->l_oid.oid_arg2 = 0; val->l_oid.oid_name = val->l_name; val->l_oid.oid_descr = ""; val->l_oid.oid_version = SYSCTL_OID_VERSION; val->l_oid.oid_refcnt = 0; // Based on the kstat type flags, provide location // of data item and associated type and handler // flags to the sysctl. switch (named->data_type) { case KSTAT_DATA_INT64: params = (sysctl_leaf_t*)kalloc(sizeof(sysctl_leaf_t)); params->l_named = named; params->l_ksp = ksp; val->l_oid.oid_handler = kstat_handle_i64; val->l_oid.oid_kind = CTLTYPE_QUAD|oid_permissions|CTLFLAG_OID2; val->l_oid.oid_fmt = "Q"; val->l_oid.oid_arg1 = (void*)params; params = 0; break; case KSTAT_DATA_UINT64: params = (sysctl_leaf_t*)kalloc(sizeof(sysctl_leaf_t)); params->l_named = named; params->l_ksp = ksp; val->l_oid.oid_handler = kstat_handle_ui64; val->l_oid.oid_kind = CTLTYPE_QUAD|oid_permissions|CTLFLAG_OID2; val->l_oid.oid_fmt = "Q"; val->l_oid.oid_arg1 = (void*)params; break; case KSTAT_DATA_INT32: val->l_oid.oid_handler = sysctl_handle_int; val->l_oid.oid_kind = CTLTYPE_INT|oid_permissions|CTLFLAG_OID2; val->l_oid.oid_fmt = "I"; val->l_oid.oid_arg1 = &named->value.i32; break; case KSTAT_DATA_UINT32: val->l_oid.oid_handler = sysctl_handle_int; val->l_oid.oid_kind = CTLTYPE_INT|oid_permissions|CTLFLAG_OID2; val->l_oid.oid_fmt = "IU"; val->l_oid.oid_arg1 = &named->value.ui32; break; case KSTAT_DATA_LONG: val->l_oid.oid_handler = sysctl_handle_long; val->l_oid.oid_kind = CTLTYPE_INT|oid_permissions|CTLFLAG_OID2; val->l_oid.oid_fmt = "L"; val->l_oid.oid_arg1 = &named->value.l; break; case KSTAT_DATA_ULONG: val->l_oid.oid_handler = sysctl_handle_long; val->l_oid.oid_kind = CTLTYPE_INT|oid_permissions|CTLFLAG_OID2; val->l_oid.oid_fmt = "L"; val->l_oid.oid_arg1 = &named->value.ul; break; case KSTAT_DATA_STRING: params = (sysctl_leaf_t*)kalloc(sizeof(sysctl_leaf_t)); params->l_named = named; params->l_ksp = ksp; val->l_oid.oid_handler = kstat_handle_string; val->l_oid.oid_kind = CTLTYPE_STRING|oid_permissions|CTLFLAG_OID2; val->l_oid.oid_fmt = "S"; val->l_oid.oid_arg1 = (void*)params; break; case KSTAT_DATA_CHAR: default: oid_valid = 0; break; } // Finally publish the OID, provided that there were no issues initialising it. if (oid_valid) { sysctl_register_oid(&val->l_oid); val->l_oid_registered = 1; } else { val->l_oid_registered = 0; } } } ksp->ks_flags &= ~KSTAT_FLAG_INVALID; }
void ia64_mca_save_state(int type) { struct ia64_sal_result result; struct mca_record_header *hdr; struct sysctl_oid *oidp; char *name, *state; uint64_t seqnr; size_t recsz, totsz; /* * Don't try to get the state if we couldn't get the size of * the state information previously. */ if (mca_info_size[type] == -1) return; while (1) { mtx_lock_spin(&mca_info_block_lock); result = ia64_sal_entry(SAL_GET_STATE_INFO, type, 0, mca_info_block, 0, 0, 0, 0); if (result.sal_status < 0) { mtx_unlock_spin(&mca_info_block_lock); return; } hdr = (struct mca_record_header *)mca_info_block; recsz = hdr->rh_length; seqnr = hdr->rh_seqnr; mtx_unlock_spin(&mca_info_block_lock); totsz = sizeof(struct sysctl_oid) + recsz + 32; oidp = malloc(totsz, M_MCA, M_WAITOK|M_ZERO); state = (char*)(oidp + 1); name = state + recsz; sprintf(name, "%lld", (long long)seqnr); mtx_lock_spin(&mca_info_block_lock); /* * If the info block doesn't have our record anymore because * we temporarily unlocked it, get it again from SAL. I assume * that it's possible that we could get a different record. * I expect this to happen in a SMP configuration where the * record has been cleared by a different processor. So, if * we get a different record we simply abort with this record * and start over. */ if (seqnr != hdr->rh_seqnr) { result = ia64_sal_entry(SAL_GET_STATE_INFO, type, 0, mca_info_block, 0, 0, 0, 0); if (seqnr != hdr->rh_seqnr) { mtx_unlock_spin(&mca_info_block_lock); free(oidp, M_MCA); continue; } } bcopy((char*)mca_info_block, state, recsz); oidp->oid_parent = &sysctl__hw_mca_children; oidp->oid_number = OID_AUTO; oidp->oid_kind = CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_DYN; oidp->oid_arg1 = state; oidp->oid_arg2 = recsz; oidp->oid_name = name; oidp->oid_handler = mca_sysctl_handler; oidp->oid_fmt = "S,MCA"; oidp->descr = "Error record"; sysctl_register_oid(oidp); if (mca_count > 0) { if (seqnr < mca_first) mca_first = seqnr; else if (seqnr > mca_last) mca_last = seqnr; } else mca_first = mca_last = seqnr; mca_count++; /* * Clear the state so that we get any other records when * they exist. */ result = ia64_sal_entry(SAL_CLEAR_STATE_INFO, type, 0, 0, 0, 0, 0, 0); mtx_unlock_spin(&mca_info_block_lock); } }
/****************************************************************************** * Generic MIB initialisation. * * This is a hack, and should be replaced with SYSINITs * at some point. */ void sysctl_mib_init(void) { cputype = cpu_type(); cpusubtype = cpu_subtype(); cputhreadtype = cpu_threadtype(); #if defined(__i386__) || defined (__x86_64__) cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit; #elif defined(__arm__) kprintf("sysctl_mib_init: NEED ARM DEFINES\n"); #else #error Unsupported arch #endif /* * Populate the optional portion of the hw.* MIB. * * XXX This could be broken out into parts of the code * that actually directly relate to the functions in * question. */ if (cputhreadtype != CPU_THREADTYPE_NONE) { sysctl_register_oid(&sysctl__hw_cputhreadtype); } #if defined (__i386__) || defined (__x86_64__) #define is_capability_set(k) (((_get_cpu_capabilities() & (k)) == (k)) ? 1 : 0) mmx_flag = is_capability_set(kHasMMX); sse_flag = is_capability_set(kHasSSE); sse2_flag = is_capability_set(kHasSSE2); sse3_flag = is_capability_set(kHasSSE3); supplementalsse3_flag = is_capability_set(kHasSupplementalSSE3); sse4_1_flag = is_capability_set(kHasSSE4_1); sse4_2_flag = is_capability_set(kHasSSE4_2); x86_64_flag = is_capability_set(k64Bit); aes_flag = is_capability_set(kHasAES); avx1_0_flag = is_capability_set(kHasAVX1_0); rdrand_flag = is_capability_set(kHasRDRAND); f16c_flag = is_capability_set(kHasF16C); enfstrg_flag = is_capability_set(kHasENFSTRG); /* hw.cpufamily */ cpufamily = cpuid_cpufamily(); /* hw.cacheconfig */ cacheconfig[0] = ml_cpu_cache_sharing(0); cacheconfig[1] = ml_cpu_cache_sharing(1); cacheconfig[2] = ml_cpu_cache_sharing(2); cacheconfig[3] = ml_cpu_cache_sharing(3); cacheconfig[4] = 0; /* hw.cachesize */ cachesize[0] = ml_cpu_cache_size(0); cachesize[1] = ml_cpu_cache_size(1); cachesize[2] = ml_cpu_cache_size(2); cachesize[3] = ml_cpu_cache_size(3); cachesize[4] = 0; /* hw.packages */ packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count) / cpuid_info()->thread_count; #elif defined(__arm__) kprintf("sysctl_mib_init: shortcircuiting to finish, reimplement\n"); #else #error unknown architecture #endif /* !__i386__ && !__x86_64 && !__arm__ */ }
bool net_lundman_zfs_zvol::start (IOService *provider) { bool res = super::start(provider); IOLog("ZFS: Loading module ... \n"); sysctl_register_oid(&sysctl__zfs); sysctl_register_oid(&sysctl__zfs_kext_version); /* * Initialize /dev/zfs, this calls spa_init->dmu_init->arc_init-> etc */ zfs_ioctl_osx_init(); /* registerService() allows zconfigd to match against the service */ this->registerService(); ///sysctl_register_oid(&sysctl__debug_maczfs); //sysctl_register_oid(&sysctl__debug_maczfs_stalk); zfs_vfsops_init(); /* * When is the best time to start the system_taskq? It is strictly * speaking not used by SPL, but by ZFS. ZFS should really start it? */ system_taskq_init(); /* * hostid is left as 0 on OSX, and left to be set if developers wish to * use it. If it is 0, we will hash the hardware.uuid into a 32 bit * value and set the hostid. */ if (!zone_get_hostid(NULL)) { uint32_t myhostid = 0; IORegistryEntry *ioregroot = IORegistryEntry::getRegistryRoot(); if(ioregroot) { //IOLog("ioregroot is '%s'\n", ioregroot->getName(gIOServicePlane)); IORegistryEntry *macmodel = ioregroot->getChildEntry(gIOServicePlane); if(macmodel) { //IOLog("macmodel is '%s'\n", macmodel->getName(gIOServicePlane)); OSObject *ioplatformuuidobj; //ioplatformuuidobj = ioregroot->getProperty("IOPlatformUUID", gIOServicePlane, kIORegistryIterateRecursively); ioplatformuuidobj = macmodel->getProperty(kIOPlatformUUIDKey); if(ioplatformuuidobj) { OSString *ioplatformuuidstr = OSDynamicCast(OSString, ioplatformuuidobj); //IOLog("IOPlatformUUID is '%s'\n", ioplatformuuidstr->getCStringNoCopy()); myhostid = fnv_32a_str(ioplatformuuidstr->getCStringNoCopy(), FNV1_32A_INIT); sysctlbyname("kern.hostid", NULL, NULL, &myhostid, sizeof(myhostid)); printf("ZFS: hostid set to %08x from UUID '%s'\n", myhostid, ioplatformuuidstr->getCStringNoCopy()); } } } } disk_remove_notifier = addMatchingNotification(gIOTerminatedNotification, serviceMatching("IOMedia"), IOkit_disk_removed_callback, this, NULL, 0); return res; }