void cs_init(void) { #if MACH_ASSERT && __x86_64__ panic_on_cs_killed = 1; #endif /* MACH_ASSERT && __x86_64__ */ PE_parse_boot_argn("panic_on_cs_killed", &panic_on_cs_killed, sizeof (panic_on_cs_killed)); #if !SECURE_KERNEL int disable_cs_enforcement = 0; PE_parse_boot_argn("cs_enforcement_disable", &disable_cs_enforcement, sizeof (disable_cs_enforcement)); if (disable_cs_enforcement) { cs_enforcement_enable = 0; } else { int panic = 0; PE_parse_boot_argn("cs_enforcement_panic", &panic, sizeof(panic)); cs_enforcement_panic = (panic != 0); } PE_parse_boot_argn("cs_debug", &cs_debug, sizeof (cs_debug)); #if !CONFIG_ENFORCE_LIBRARY_VALIDATION PE_parse_boot_argn("cs_library_val_enable", &cs_library_val_enable, sizeof (cs_library_val_enable)); #endif #endif /* !SECURE_KERNEL */ lck_grp_attr_t *attr = lck_grp_attr_alloc_init(); cs_lockgrp = lck_grp_alloc_init("KERNCS", attr); }
void S5L8900X_framebuffer_init(void) { char tempbuf[16]; /* * Technically, iBoot should initialize this.. Haven't bothered * to reverse this part properly, if you're using a 16-bit panel, then use * the 'rgb565' boot-argument if you care about a working framebuffer... */ PE_state.video.v_depth = 4 * (8); // 32bpp if (PE_parse_boot_argn("rgb565", tempbuf, sizeof(tempbuf))) { PE_state.video.v_depth = 2 * (8); // 16bpp } kprintf(KPRINTF_PREFIX "framebuffer initialized\n"); /* * Enable early framebuffer. */ //if (PE_parse_boot_argn("-early-fb-debug", tempbuf, sizeof(tempbuf))) { initialize_screen((void *) &PE_state.video, kPEAcquireScreen); //} if (PE_parse_boot_argn("-graphics-mode", tempbuf, sizeof(tempbuf))) { initialize_screen((void *) &PE_state.video, kPEGraphicsMode); } else { initialize_screen((void *) &PE_state.video, kPETextMode); } return; }
bool FakeSMC::start(IOService *provider) { if (!super::start(provider)) return false; OSString *vendor = OSDynamicCast(OSString, getProperty(kFakeSMCFirmwareVendor)); int arg_value = 1; if (PE_parse_boot_argn("-fakesmc-force-start", &arg_value, sizeof(arg_value))) { HWSensorsInfoLog("firmware vendor check disabled"); } else if (vendor && vendor->isEqualTo("Apple")) { HWSensorsFatalLog("forbidding start on Apple hardware"); return false; } if (!smcDevice->initAndStart(provider, this)) { HWSensorsInfoLog("failed to initialize SMC device"); return false; } registerService(); // Load keys from NVRAM if (PE_parse_boot_argn("-fakesmc-use-nvram", &arg_value, sizeof(arg_value))) { if (UInt32 count = smcDevice->loadKeysFromNVRAM()) HWSensorsInfoLog("%d key%s loaded from NVRAM", count, count == 1 ? "" : "s"); else HWSensorsInfoLog("NVRAM will be used to store system written keys..."); } return true; }
void PE_init_SocSupport_S5L8900X(void) { gPESocDispatch.uart_getc = S5L8900X_getc; gPESocDispatch.uart_putc = S5L8900X_putc; gPESocDispatch.uart_init = S5L8900X_uart_init; gPESocDispatch.interrupt_init = S5L8900X_interrupt_init; gPESocDispatch.timebase_init = S5L8900X_timebase_init; gPESocDispatch.get_timebase = S5L8900X_get_timebase; gPESocDispatch.handle_interrupt = S5L8900X_handle_interrupt; gPESocDispatch.timer_value = S5L8900X_timer_value; gPESocDispatch.timer_enabled = S5L8900X_timer_enabled; gPESocDispatch.framebuffer_init = S5L8900X_framebuffer_init; char tempbuf[16]; if (PE_parse_boot_argn("-avoid-uarts", tempbuf, sizeof(tempbuf))) { avoid_uarts = 1; } if (PE_parse_boot_argn("-force-uarts", tempbuf, sizeof(tempbuf))) { avoid_uarts = 0; } S5L8900X_framebuffer_init(); S5L8900X_uart_init(); PE_halt_restart = S5L8900X_halt_restart; }
void StartIOKit( void * p1, void * p2, void * p3, void * p4 ) { IOPlatformExpertDevice * rootNub; int debugFlags; if( PE_parse_boot_argn( "io", &debugFlags, sizeof (debugFlags) )) gIOKitDebug = debugFlags; if( PE_parse_boot_argn( "iotrace", &debugFlags, sizeof (debugFlags) )) gIOKitTrace = debugFlags; // Compat for boot-args gIOKitTrace |= (gIOKitDebug & kIOTraceCompatBootArgs); // Check for the log synchronous bit set in io if (gIOKitDebug & kIOLogSynchronous) debug_mode = true; // // Have to start IOKit environment before we attempt to start // the C++ runtime environment. At some stage we have to clean up // the initialisation path so that OS C++ can initialise independantly // of iokit basic service initialisation, or better we have IOLib stuff // initialise as basic OS services. // IOLibInit(); OSlibkernInit(); gIOProgressBackbufferKey = OSSymbol::withCStringNoCopy(kIOProgressBackbufferKey); gIORemoveOnReadProperties = OSSet::withObjects((const OSObject **) &gIOProgressBackbufferKey, 1); interruptAccountingInit(); rootNub = new IOPlatformExpertDevice; if( rootNub && rootNub->initWithArgs( p1, p2, p3, p4)) { rootNub->attach( 0 ); /* If the bootstrap segment set up a function to record startup * extensions, call it now. */ if (record_startup_extensions_function) { record_startup_extensions_function(); } rootNub->registerService(); #if !NO_KEXTD /* Add a busy count to keep the registry busy until kextd has * completely finished launching. This is decremented when kextd * messages the kernel after the in-kernel linker has been * removed and personalities have been sent. */ IOService::getServiceRoot()->adjustBusy(1); #endif } }
void telemetry_init(void) { kern_return_t ret; uint32_t telemetry_notification_leeway; lck_grp_init(&telemetry_lck_grp, "telemetry group", LCK_GRP_ATTR_NULL); lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL); if (!PE_parse_boot_argn("telemetry_buffer_size", &telemetry_buffer.size, sizeof(telemetry_buffer.size))) { telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE; } if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE; ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size, VM_KERN_MEMORY_DIAG); if (ret != KERN_SUCCESS) { kprintf("Telemetry: Allocation failed: %d\n", ret); return; } bzero((void *) telemetry_buffer.buffer, telemetry_buffer.size); if (!PE_parse_boot_argn("telemetry_notification_leeway", &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) { /* * By default, notify the user to collect the buffer when there is this much space left in the buffer. */ telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY; } if (telemetry_notification_leeway >= telemetry_buffer.size) { printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n", telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY); telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY; } telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway; if (!PE_parse_boot_argn("telemetry_sample_rate", &telemetry_sample_rate, sizeof(telemetry_sample_rate))) { telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE; } /* * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args. */ if (!PE_parse_boot_argn("telemetry_sample_all_tasks", &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) { #if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) telemetry_sample_all_tasks = FALSE; #else telemetry_sample_all_tasks = TRUE; #endif /* CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) */ } kprintf("Telemetry: Sampling %stasks once per %u second%s\n", (telemetry_sample_all_tasks) ? "all " : "", telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s"); }
/* * this has been broken out into a separate routine that * can be called from the x86 early vm initialization to * determine how much lo memory to reserve on systems with * DMA hardware that can't fully address all of the physical * memory that is present. */ unsigned int bsd_mbuf_cluster_reserve(boolean_t *overridden) { int mbuf_pool = 0; static boolean_t was_overridden = FALSE; /* If called more than once, return the previously calculated size */ if (mbuf_poolsz != 0) goto done; /* * Some of these are parsed in parse_bsd_args(), but for x86 we get * here early from i386_vm_init() and so we parse them now, in order * to correctly compute the size of the low-memory VM pool. It is * redundant but rather harmless. */ (void) PE_parse_boot_argn("ncl", &ncl, sizeof (ncl)); (void) PE_parse_boot_argn("mbuf_pool", &mbuf_pool, sizeof (mbuf_pool)); /* * Convert "mbuf_pool" from MB to # of 2KB clusters; it is * equivalent to "ncl", except that it uses different unit. */ if (mbuf_pool != 0) ncl = (mbuf_pool << MBSHIFT) >> MCLSHIFT; if (sane_size > (64 * 1024 * 1024) || ncl != 0) { if (ncl || serverperfmode) was_overridden = TRUE; if ((nmbclusters = ncl) == 0) { /* Auto-configure the mbuf pool size */ nmbclusters = mbuf_default_ncl(serverperfmode, sane_size); } else { /* Make sure it's not odd in case ncl is manually set */ if (nmbclusters & 0x1) --nmbclusters; /* And obey the upper limit */ if (nmbclusters > MAX_NCL) nmbclusters = MAX_NCL; } /* Round it down to nearest multiple of 4KB clusters */ nmbclusters = P2ROUNDDOWN(nmbclusters, NCLPBG); } mbuf_poolsz = nmbclusters << MCLSHIFT; done: if (overridden) *overridden = was_overridden; return (mbuf_poolsz); }
bool IOPlatformExpert::start( IOService * provider ) { IORangeAllocator * physicalRanges; OSData * busFrequency; uint32_t debugFlags; if (!super::start(provider)) return false; // Override the mapper present flag is requested by boot arguments. if (PE_parse_boot_argn("dart", &debugFlags, sizeof (debugFlags)) && (debugFlags == 0)) removeProperty(kIOPlatformMapperPresentKey); if (PE_parse_boot_argn("-x", &debugFlags, sizeof (debugFlags))) removeProperty(kIOPlatformMapperPresentKey); // Register the presence or lack thereof a system // PCI address mapper with the IOMapper class IOMapper::setMapperRequired(0 != getProperty(kIOPlatformMapperPresentKey)); gIOInterruptControllers = OSDictionary::withCapacity(1); gIOInterruptControllersLock = IOLockAlloc(); // Correct the bus frequency in the device tree. busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); provider->setProperty("clock-frequency", busFrequency); busFrequency->release(); gPlatformInterruptControllerName = (OSSymbol *)OSSymbol::withCStringNoCopy("IOPlatformInterruptController"); physicalRanges = IORangeAllocator::withRange(0xffffffff, 1, 16, IORangeAllocator::kLocking); assert(physicalRanges); setProperty("Platform Memory Ranges", physicalRanges); setPlatform( this ); gIOPlatform = this; PMInstantiatePowerDomains(); // Parse the serial-number data and publish a user-readable string OSData* mydata = (OSData*) (provider->getProperty("serial-number")); if (mydata != NULL) { OSString *serNoString = createSystemSerialNumberString(mydata); if (serNoString != NULL) { provider->setProperty(kIOPlatformSerialNumberKey, serNoString); serNoString->release(); } } return( configure(provider) ); }
/* * sysctl function * */ int ucode_interface(uint64_t addr) { int error; char arg[16]; if (PE_parse_boot_argn("-x", arg, sizeof (arg))) { printf("ucode: no updates in safe mode\n"); return EPERM; } #if !DEBUG /* * Userland may only call this once per boot. Anything else * would not make sense (all updates are cumulative), and also * leak memory, because we don't free previous updates. */ if (global_update) return EPERM; #endif /* Get the whole microcode */ error = copyin_update(addr); if (error) return error; /* Farm out the updates */ xcpu_update(); return 0; }
int imageboot_setup() { dev_t dev; int error = 0; char *root_path = NULL; DBG_TRACE("%s: entry\n", __FUNCTION__); MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); if (root_path == NULL) return (ENOMEM); if(PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == FALSE) { error = ENOENT; goto done; } printf("%s: root image url is %s\n", __FUNCTION__, root_path); error = di_root_image(root_path, rootdevice, &dev); if(error) { printf("%s: di_root_image failed: %d\n", __FUNCTION__, error); goto done; } rootdev = dev; mountroot = NULL; printf("%s: root device 0x%x\n", __FUNCTION__, rootdev); error = vfs_mountroot(); if (error == 0 && rootvnode != NULL) { struct vnode *tvp; struct vnode *newdp; /* * Get the vnode for '/'. * Set fdp->fd_fd.fd_cdir to reference it. */ if (VFS_ROOT(TAILQ_LAST(&mountlist,mntlist), &newdp, vfs_context_kernel())) panic("%s: cannot find root vnode", __FUNCTION__); vnode_ref(newdp); vnode_put(newdp); tvp = rootvnode; vnode_rele(tvp); filedesc0.fd_cdir = newdp; rootvnode = newdp; mount_list_lock(); TAILQ_REMOVE(&mountlist, TAILQ_FIRST(&mountlist), mnt_list); mount_list_unlock(); mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; DBG_TRACE("%s: root switched\n", __FUNCTION__); } done: FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); DBG_TRACE("%s: exit\n", __FUNCTION__); return (error); }
int imageboot_needed(void) { int result = 0; char *root_path = NULL; DBG_TRACE("%s: checking for presence of root path\n", __FUNCTION__); MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); if (root_path == NULL) panic("%s: M_NAMEI zone exhausted", __FUNCTION__); if(PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == TRUE) { /* Got it, now verify scheme */ if (strncmp(root_path, kIBFilePrefix, strlen(kIBFilePrefix)) == 0) { DBG_TRACE("%s: Found %s\n", __FUNCTION__, root_path); result = 1; } else { DBG_TRACE("%s: Invalid URL scheme for %s\n", __FUNCTION__, root_path); } } FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); return (result); }
int serial_init( void ) { unsigned new_uart_baud_rate = 0; if (PE_parse_boot_argn("serialbaud", &new_uart_baud_rate, sizeof (new_uart_baud_rate))) { /* Valid divisor? */ if (!((LEGACY_UART_CLOCK / 16) % new_uart_baud_rate)) { uart_baud_rate = new_uart_baud_rate; } } if ( mmio_uart_probe() ) { gPESF = &mmio_uart_serial_functions; gPESF->uart_init(); return 1; } else if ( legacy_uart_probe() ) { gPESF = &legacy_uart_serial_functions; gPESF->uart_init(); return 1; } else { return 0; } }
static int mmio_uart_probe( void ) { unsigned new_mmio_uart_base = 0; // if specified, mmio_uart overrides all probing if (PE_parse_boot_argn("mmio_uart", &new_mmio_uart_base, sizeof (new_mmio_uart_base))) { // mmio_uart=0 will disable mmio_uart support if (new_mmio_uart_base == 0) { return 0; } mmio_uart_base = new_mmio_uart_base; return 1; } // probe the two possible MMIO_UART2 addresses mmio_uart_base = MMIO_UART2_BASE; if (mmio_uart_present()) { return 1; } mmio_uart_base = MMIO_UART2_BASE_LEGACY; if (mmio_uart_present()) { return 1; } // no mmio uart found return 0; }
/* * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator. * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c */ __private_extern__ void panic_display_ztrace(void) { if(panic_include_ztrace == TRUE) { unsigned int i = 0; boolean_t keepsyms = FALSE; PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms)); struct ztrace top_ztrace_copy; /* Make sure not to trip another panic if there's something wrong with memory */ if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) { kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size); /* Print the backtrace addresses */ for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) { kdb_printf("%p ", top_ztrace_copy.zt_stack[i]); if (keepsyms) { panic_print_symbol_name((vm_address_t)top_ztrace_copy.zt_stack[i]); } kdb_printf("\n"); } /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */ kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth); } else { kdb_printf("\nCan't access top_ztrace...\n"); } kdb_printf("\n"); } }
void PE_init_kprintf(boolean_t vm_initialized) { unsigned int boot_arg; if (PE_state.initialized == FALSE) panic("Platform Expert not initialized"); if (!vm_initialized) { unsigned int new_disable_serial_output = TRUE; simple_lock_init(&kprintf_lock, 0); if (PE_parse_boot_argn("debug", &boot_arg, sizeof (boot_arg))) if (boot_arg & DB_KPRT) new_disable_serial_output = FALSE; /* If we are newly enabling serial, make sure we only * call pal_serial_init() if our previous state was * not enabled */ if (!new_disable_serial_output && (!disable_serial_output || pal_serial_init())) PE_kputc = pal_serial_putc; else PE_kputc = cnputc; disable_serial_output = new_disable_serial_output; } }
/* * rtc_timer_init() is called at startup on the boot processor only. */ void rtc_timer_init(void) { int TSC_deadline_timer = 0; /* See whether we can use the local apic in TSC-deadline mode */ if ((cpuid_features() & CPUID_FEATURE_TSCTMR)) { TSC_deadline_timer = 1; PE_parse_boot_argn("TSC_deadline_timer", &TSC_deadline_timer, sizeof(TSC_deadline_timer)); printf("TSC Deadline Timer supported %s enabled\n", TSC_deadline_timer ? "and" : "but not"); } if (TSC_deadline_timer) { rtc_timer = &rtc_timer_tsc_deadline; rtc_decrementer_max = UINT64_MAX; /* effectively none */ /* * The min could be as low as 1nsec, * but we're being conservative for now and making it the same * as for the local apic timer. */ rtc_decrementer_min = 1*NSEC_PER_USEC; /* 1 usec */ } else { /* * Compute the longest interval using LAPIC timer. */ rtc_decrementer_max = tmrCvt(0x7fffffffULL, busFCvtt2n); kprintf("maxDec: %lld\n", rtc_decrementer_max); rtc_decrementer_min = 1*NSEC_PER_USEC; /* 1 usec */ } /* Point LAPIC interrupts to hardclock() */ lapic_set_timer_func((i386_intr_func_t) rtclock_intr); }
void RealView_framebuffer_init(void) { gRealviewPl111Base = ml_io_map(REALVIEW_PL111_BASE, PAGE_SIZE); /* * The hardware demands a framebuffer, but the framebuffer has to be given * in a hardware address. */ void *framebuffer = pmap_steal_memory(1024 * 768 * 4); void *framebuffer_phys = pmap_extract(kernel_pmap, framebuffer); uint32_t depth = 2; uint32_t width = 1024; uint32_t height = 768; uint32_t pitch = (width * depth); uint32_t fb_length = (pitch * width); uint32_t timingRegister, controlRegister; /* * Set framebuffer address */ HARDWARE_REGISTER(gRealviewPl111Base + PL111_UPPER_FB) = framebuffer_phys; HARDWARE_REGISTER(gRealviewPl111Base + PL111_LOWER_FB) = framebuffer_phys; /* * Initialize timings to 1024x768x16 */ HARDWARE_REGISTER(gRealviewPl111Base + PL111_TIMINGS_0) = LCDTIMING0_PPL(width); HARDWARE_REGISTER(gRealviewPl111Base + PL111_TIMINGS_1) = LCDTIMING1_LPP(height); /* * Enable the TFT/LCD Display */ HARDWARE_REGISTER(gRealviewPl111Base + PL111_CONTROL) = LCDCONTROL_LCDEN | LCDCONTROL_LCDTFT | LCDCONTROL_LCDPWR | LCDCONTROL_LCDBPP(5); PE_state.video.v_baseAddr = (unsigned long) framebuffer_phys; PE_state.video.v_rowBytes = width * 4; PE_state.video.v_width = width; PE_state.video.v_height = height; PE_state.video.v_depth = 4 * (8); // 16bpp kprintf(KPRINTF_PREFIX "framebuffer initialized\n"); bzero(framebuffer, (pitch * height)); char tempbuf[16]; if (PE_parse_boot_argn("-graphics-mode", tempbuf, sizeof(tempbuf))) { /* * BootX like framebuffer. */ memset(framebuffer, 0xb9, PE_state.video.v_rowBytes * PE_state.video.v_height); initialize_screen((void *) &PE_state.video, kPEGraphicsMode); } else { initialize_screen((void *) &PE_state.video, kPETextMode); } }
/* * this has been broken out into a separate routine that * can be called from the x86 early vm initialization to * determine how much lo memory to reserve on systems with * DMA hardware that can't fully address all of the physical * memory that is present. */ unsigned int bsd_mbuf_cluster_reserve(void) { int mbuf_pool = 0; /* If called more than once, return the previously calculated size */ if (mbuf_poolsz != 0) goto done; /* * Some of these are parsed in parse_bsd_args(), but for x86 we get * here early from i386_vm_init() and so we parse them now, in order * to correctly compute the size of the low-memory VM pool. It is * redundant but rather harmless. */ //(void) PE_parse_boot_argn("srv", &srv, sizeof (srv)); (void) PE_parse_boot_argn("ncl", &ncl, sizeof (ncl)); (void) PE_parse_boot_argn("mbuf_pool", &mbuf_pool, sizeof (mbuf_pool)); /* * Convert "mbuf_pool" from MB to # of 2KB clusters; it is * equivalent to "ncl", except that it uses different unit. */ if (mbuf_pool != 0) ncl = (mbuf_pool << MBSHIFT) >> MCLSHIFT; if (sane_size > (64 * 1024 * 1024) || ncl != 0) { if ((nmbclusters = ncl) == 0) { /* Auto-configure the mbuf pool size */ nmbclusters = mbuf_default_ncl(srv, sane_size); } else { /* Make sure it's not odd in case ncl is manually set */ if (nmbclusters & 0x1) --nmbclusters; /* And obey the upper limit */ if (nmbclusters > MAX_NCL) nmbclusters = MAX_NCL; } } mbuf_poolsz = nmbclusters << MCLSHIFT; done: return (mbuf_poolsz); }
static uint32_t compute_wait_hash_size(__unused unsigned cpu_count, __unused uint64_t memsize) { uint32_t hsize = (uint32_t)round_page_64((thread_max / 11) * sizeof(struct wait_queue)); uint32_t bhsize; if (PE_parse_boot_argn("wqsize", &bhsize, sizeof(bhsize))) hsize = bhsize; return hsize; }
/* * Initialize global state needed for run-time * port debugging. */ void ipc_port_debug_init(void) { queue_init(&port_alloc_queue); lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr); if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof (ipc_portbt))) ipc_portbt = 0; }
kern_return_t Startup(kmod_info_t* ki, void * d) { uint32_t v, thisKernelVersion; thisKernelVersion = MakeKernelVersion(version_major, version_minor, version_revision); if (thisKernelVersion < MakeKernelVersion(11, 4, 2)) { IOLog("OS 10.7.5 or later required for GenericUSBXHCI\n"); return KERN_FAILURE; } #ifndef REHABMAN_UNIVERSAL_BUILD #if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101000 if (thisKernelVersion < MakeKernelVersion(14, 0, 0)) { IOLog("OS 10.10.0 or later required for this build of GenericUSBXHCI\n"); return KERN_FAILURE; } if (thisKernelVersion >= MakeKernelVersion(15, 0, 0)) { IOLog("GenericUSBXHCI does not work on OS 10.11 or later\n"); return KERN_FAILURE; } #else if (thisKernelVersion >= MakeKernelVersion(14, 0, 0)) { IOLog("This build of GenericUSBXHCI is not compatible with OS 10.10\n"); return KERN_FAILURE; } #endif #endif #ifdef __LP64__ if (thisKernelVersion >= MakeKernelVersion(12, 5, 0)) gux_options |= GUX_OPTION_MAVERICKS; if (thisKernelVersion >= MakeKernelVersion(14, 0, 0)) gux_options |= GUX_OPTION_YOSEMITE; #endif if (PE_parse_boot_argn("-gux_nosleep", &v, sizeof v)) gux_options |= GUX_OPTION_NO_SLEEP; if (PE_parse_boot_argn("-gux_defer_usb2", &v, sizeof v)) gux_options |= GUX_OPTION_DEFER_INTEL_EHC_PORTS; if (PE_parse_boot_argn("-gux_nomsi", &v, sizeof v)) gux_options |= GUX_OPTION_NO_MSI; if (PE_parse_boot_argn("gux_log", &v, sizeof v)) gux_log_level = static_cast<int>(v); return KERN_SUCCESS; }
void lapic_configure(void) { int value; if (lapic_error_time_threshold == 0 && cpu_number() == 0) { nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold); if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) { lapic_dont_panic = FALSE; } }
void lck_mod_init( void) { /* * Obtain "lcks" options:this currently controls lock statistics */ if (!PE_parse_boot_argn("lcks", &LcksOpts, sizeof (LcksOpts))) LcksOpts = 0; #if (DEVELOPMENT || DEBUG) && defined(__x86_64__) if (!PE_parse_boot_argn("-disable_mtx_chk", &LckDisablePreemptCheck, sizeof (LckDisablePreemptCheck))) LckDisablePreemptCheck = 0; #endif /* (DEVELOPMENT || DEBUG) && defined(__x86_64__) */ queue_init(&lck_grp_queue); /* * Need to bootstrap the LockCompatGroup instead of calling lck_grp_init() here. This avoids * grabbing the lck_grp_lock before it is initialized. */ bzero(&LockCompatGroup, sizeof(lck_grp_t)); (void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME); if (LcksOpts & enaLkStat) LockCompatGroup.lck_grp_attr = LCK_GRP_ATTR_STAT; else LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE; LockCompatGroup.lck_grp_refcnt = 1; enqueue_tail(&lck_grp_queue, (queue_entry_t)&LockCompatGroup); lck_grp_cnt = 1; lck_grp_attr_setdefault(&LockDefaultGroupAttr); lck_attr_setdefault(&LockDefaultLckAttr); lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr); }
void interruptAccountingInit(void) { int bootArgValue = 0; if (PE_parse_boot_argn("interrupt_accounting", &bootArgValue, sizeof(bootArgValue))) gInterruptAccountingStatisticBitmask = bootArgValue; gInterruptAccountingDataListLock = IOLockAlloc(); assert(gInterruptAccountingDataListLock); queue_init(&gInterruptAccountingDataList); }
boolean_t PE_parse_boot_arg( const char *arg_string, void *arg_ptr) { int max_len = -1; #if CONFIG_EMBEDDED /* Limit arg size to 4 byte when no size is given */ max_len = 4; #endif return PE_parse_boot_argn(arg_string, arg_ptr, max_len); }
static void parse_bsd_args(void) { char namep[16]; int msgbuf; if (PE_parse_boot_argn("-s", namep, sizeof (namep))) boothowto |= RB_SINGLE; if (PE_parse_boot_argn("-b", namep, sizeof (namep))) boothowto |= RB_NOBOOTRC; if (PE_parse_boot_argn("-x", namep, sizeof (namep))) /* safe boot */ boothowto |= RB_SAFEBOOT; if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */ turn_on_log_leaks = 1; /* disable 64 bit grading */ if (PE_parse_boot_argn("-no64exec", namep, sizeof (namep))) bootarg_no64exec = 1; PE_parse_boot_argn("ncl", &ncl, sizeof (ncl)); if (PE_parse_boot_argn("nbuf", &max_nbuf_headers, sizeof (max_nbuf_headers))) { customnbuf = 1; } #if !defined(SECURE_KERNEL) PE_parse_boot_argn("kmem", &setup_kmem, sizeof (setup_kmem)); #endif PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs)); if (PE_parse_boot_argn("msgbuf", &msgbuf, sizeof (msgbuf))) { log_setsize(msgbuf); } }
bool FakeSMC::start(IOService *provider) { if (!super::start(provider)) return false; int arg_value = 1; // Check if we have SMC already if (OSDictionary *matching = serviceMatching("IOACPIPlatformDevice")) { if (OSIterator *iterator = getMatchingServices(matching)) { OSString *smcNameProperty = OSString::withCString("APP0001"); while (IOService *service = (IOService*)iterator->getNextObject()) { OSObject *serviceNameProperty = service->getProperty("name"); if (serviceNameProperty && serviceNameProperty->isEqualTo(smcNameProperty)) { HWSensorsFatalLog("SMC device detected, will not create another one"); return false; } } OSSafeRelease(iterator); } OSSafeRelease(matching); } if (!smcDevice->initAndStart(provider, this)) { HWSensorsInfoLog("failed to initialize SMC device"); return false; } registerService(); // Load keys from NVRAM if (PE_parse_boot_argn("-fakesmc-use-nvram", &arg_value, sizeof(arg_value))) { if (UInt32 count = smcDevice->loadKeysFromNVRAM()) HWSensorsInfoLog("%d key%s loaded from NVRAM", count, count == 1 ? "" : "s"); else HWSensorsInfoLog("NVRAM will be used to store system written keys..."); } return true; }
void RealView_uart_init(void) { char temp_buf[16]; gRealviewUartBase = ml_io_map(REALVIEW_UART0_BASE, PAGE_SIZE); if (PE_parse_boot_argn("-use_realview_eb_pic", temp_buf, sizeof(temp_buf))) { gRealviewPicBase = ml_io_map(REALVIEW_EB_PIC0_BASE, PAGE_SIZE); gRealviewPicDistribBase = ml_io_map(REALVIEW_EB_PIC0_BASE + PAGE_SIZE, PAGE_SIZE); } else { gRealviewPicBase = ml_io_map(REALVIEW_PIC0_BASE, PAGE_SIZE); gRealviewPicDistribBase = ml_io_map(REALVIEW_PIC0_BASE + PAGE_SIZE, PAGE_SIZE); } gRealviewSysControllerBase = ml_io_map(REALVIEW_SYSCTL_BASE, PAGE_SIZE); gRealviewTimerBase = ml_io_map(REALVIEW_TIMER0_BASE, PAGE_SIZE); }
bool GPUSensors::start(IOService *provider) { HWSensorsDebugLog("Starting..."); int arg_value = 1; if (PE_parse_boot_argn("-gpusensors-disable", &arg_value, sizeof(arg_value))) { return false; } if (!provider || !super::start(provider)) return false; if (!(pciDevice = OSDynamicCast(IOPCIDevice, provider))) { HWSensorsFatalLog("no PCI device"); return false; } if (!onStartUp(provider)) return false; if (shouldWaitForAccelerator()) { if (!(workloop = getWorkLoop())) { HWSensorsFatalLog("failed to obtain workloop"); return false; } if (!(timerEventSource = IOTimerEventSource::timerEventSource( this, OSMemberFunctionCast(IOTimerEventSource::Action, this, &GPUSensors::probeEvent)))) { HWSensorsFatalLog("failed to initialize startup check timer event source"); return false; } if (kIOReturnSuccess != workloop->addEventSource(timerEventSource)) { HWSensorsFatalLog("failed to add startup check timer event source into workloop"); timerEventSource->release(); return false; } timerEventSource->setTimeoutMS(100); } else return managedStart(provider); return true; }
void csr_init(void) { boot_args *args = (boot_args *)PE_state.bootArgs; if (args->flags & kBootArgsFlagCSRBoot) { /* special booter; allow everything */ csr_allow_all = 1; } int rootless_boot_arg; if (PE_parse_boot_argn("rootless", &rootless_boot_arg, sizeof(rootless_boot_arg))) { /* XXX: set csr_allow_all to boot arg value for now * (to be removed by <rdar://problem/16239861>) */ csr_allow_all = !rootless_boot_arg; /* if rootless=1, do not allow everything when CSR_ALLOW_APPLE_INTERNAL is set */ csr_allow_internal &= !rootless_boot_arg; } }