void cpu_exit_wait( int cpu) { cpu_data_t *cdp = cpu_datap(cpu); boolean_t intrs_enabled; uint64_t tsc_timeout; /* * Wait until the CPU indicates that it has stopped. * Disable interrupts while the topo lock is held -- arguably * this should always be done but in this instance it can lead to * a timeout if long-running interrupt were to occur here. */ intrs_enabled = ml_set_interrupts_enabled(FALSE); simple_lock(&x86_topo_lock); /* Set a generous timeout of several seconds (in TSC ticks) */ tsc_timeout = rdtsc64() + (10ULL * 1000 * 1000 * 1000); while ((cdp->lcpu.state != LCPU_HALT) && (cdp->lcpu.state != LCPU_OFF) && !cdp->lcpu.stopped) { simple_unlock(&x86_topo_lock); ml_set_interrupts_enabled(intrs_enabled); cpu_pause(); if (rdtsc64() > tsc_timeout) panic("cpu_exit_wait(%d) timeout", cpu); ml_set_interrupts_enabled(FALSE); simple_lock(&x86_topo_lock); } simple_unlock(&x86_topo_lock); ml_set_interrupts_enabled(intrs_enabled); }
pcid_t pmap_pcid_allocate_pcid(int ccpu) { int i; pcid_ref_t cur_min = 0xFF; uint32_t cur_min_index = ~1; pcid_ref_t *cpu_pcid_refcounts = &cpu_datap(ccpu)->cpu_pcid_refcounts[0]; pcid_ref_t old_count; if ((i = cpu_datap(ccpu)->cpu_pcid_free_hint) != 0) { if (cpu_pcid_refcounts[i] == 0) { (void)__sync_fetch_and_add(&cpu_pcid_refcounts[i], 1); cpu_datap(ccpu)->cpu_pcid_free_hint = 0; return i; } } /* Linear scan to discover free slot, with hint. Room for optimization * but with intelligent prefetchers this should be * adequately performant, as it is invoked * only on first dispatch of a new address space onto * a given processor. DRKTODO: use larger loads and * zero byte discovery -- any pattern != ~1 should * signify a free slot. */ for (i = PMAP_PCID_MIN_PCID; i < PMAP_PCID_MAX_PCID; i++) { pcid_ref_t cur_refcount = cpu_pcid_refcounts[i]; pmap_assert(cur_refcount < PMAP_PCID_MAX_REFCOUNT); if (cur_refcount == 0) { (void)__sync_fetch_and_add(&cpu_pcid_refcounts[i], 1); return i; } else { if (cur_refcount < cur_min) { cur_min_index = i; cur_min = cur_refcount; } } } pmap_assert(cur_min_index > 0 && cur_min_index < PMAP_PCID_MAX_PCID); /* Consider "rebalancing" tags actively in highly oversubscribed cases * perhaps selecting tags with lower activity. */ old_count = __sync_fetch_and_add(&cpu_pcid_refcounts[cur_min_index], 1); pmap_assert(old_count < PMAP_PCID_MAX_REFCOUNT); return cur_min_index; }
void pmap_pcid_activate(pmap_t tpmap, int ccpu) { pcid_t new_pcid = tpmap->pmap_pcid_cpus[ccpu]; pmap_t last_pmap; boolean_t pcid_conflict = FALSE, pending_flush = FALSE; pmap_assert(cpu_datap(ccpu)->cpu_pmap_pcid_enabled); if (__improbable(new_pcid == PMAP_PCID_INVALID_PCID)) { new_pcid = tpmap->pmap_pcid_cpus[ccpu] = pmap_pcid_allocate_pcid(ccpu); } pmap_assert(new_pcid != PMAP_PCID_INVALID_PCID); #ifdef PCID_ASSERT cpu_datap(ccpu)->cpu_last_pcid = cpu_datap(ccpu)->cpu_active_pcid; #endif cpu_datap(ccpu)->cpu_active_pcid = new_pcid; pending_flush = (tpmap->pmap_pcid_coherency_vector[ccpu] != 0); if (__probable(pending_flush == FALSE)) { last_pmap = cpu_datap(ccpu)->cpu_pcid_last_pmap_dispatched[new_pcid]; pcid_conflict = ((last_pmap != NULL) &&(tpmap != last_pmap)); } if (__improbable(pending_flush || pcid_conflict)) { pmap_pcid_validate_cpu(tpmap, ccpu); } /* Consider making this a unique id */ cpu_datap(ccpu)->cpu_pcid_last_pmap_dispatched[new_pcid] = tpmap; pmap_assert(new_pcid < PMAP_PCID_MAX_PCID); pmap_assert(((tpmap == kernel_pmap) && new_pcid == 0) || ((new_pcid != PMAP_PCID_INVALID_PCID) && (new_pcid != 0))); #if PMAP_ASSERT pcid_record_array[ccpu % PCID_RECORD_SIZE] = tpmap->pm_cr3 | new_pcid | (((uint64_t)(!(pending_flush || pcid_conflict))) <<63); pml4_entry_t *pml4 = pmap64_pml4(tpmap, 0ULL); /* Diagnostic to detect pagetable anchor corruption */ if (pml4[KERNEL_PML4_INDEX] != kernel_pmap->pm_pml4[KERNEL_PML4_INDEX]) __asm__ volatile("int3"); #endif /* PMAP_ASSERT */ set_cr3_composed(tpmap->pm_cr3, new_pcid, !(pending_flush || pcid_conflict)); if (!pending_flush) { /* We did not previously observe a pending invalidation for this * ASID. However, the load from the coherency vector * could've been reordered ahead of the store to the * active_cr3 field (in the context switch path, our * caller). Re-consult the pending invalidation vector * after the CR3 write. We rely on MOV CR3's documented * serializing property to avoid insertion of an expensive * barrier. (DRK) */ pending_flush = (tpmap->pmap_pcid_coherency_vector[ccpu] != 0); if (__improbable(pending_flush != 0)) { pmap_pcid_validate_cpu(tpmap, ccpu); set_cr3_composed(tpmap->pm_cr3, new_pcid, FALSE); } } cpu_datap(ccpu)->cpu_pmap_pcid_coherentp = &(tpmap->pmap_pcid_coherency_vector[ccpu]); #if DEBUG KERNEL_DEBUG_CONSTANT(0x9c1d0000, tpmap, new_pcid, pending_flush, pcid_conflict, 0); #endif }
/* ----------------------------------------------------------------------------- vmx_free_vmxon_regions() Free VMXON regions for all CPUs. -------------------------------------------------------------------------- */ static void vmx_free_vmxon_regions(void) { unsigned int i; for (i=0; i<real_ncpus; i++) { vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; vmx_pfree(cpu->vmxon_region); cpu->vmxon_region = NULL; } }
void pmap_pcid_deallocate_pcid(int ccpu, pmap_t tpmap) { pcid_t pcid; pmap_t lp; pcid_ref_t prior_count; pcid = tpmap->pmap_pcid_cpus[ccpu]; pmap_assert(pcid != PMAP_PCID_INVALID_PCID); if (pcid == PMAP_PCID_INVALID_PCID) return; lp = cpu_datap(ccpu)->cpu_pcid_last_pmap_dispatched[pcid]; pmap_assert(pcid > 0 && pcid < PMAP_PCID_MAX_PCID); pmap_assert(cpu_datap(ccpu)->cpu_pcid_refcounts[pcid] >= 1); if (lp == tpmap) (void)__sync_bool_compare_and_swap(&cpu_datap(ccpu)->cpu_pcid_last_pmap_dispatched[pcid], tpmap, PMAP_INVALID); if ((prior_count = __sync_fetch_and_sub(&cpu_datap(ccpu)->cpu_pcid_refcounts[pcid], 1)) == 1) { cpu_datap(ccpu)->cpu_pcid_free_hint = pcid; } pmap_assert(prior_count <= PMAP_PCID_MAX_REFCOUNT); }
/** * current_cpu_datap * * Return the current processor PCB. */ cpu_data_t* current_cpu_datap(void) { int smp_number = get_cpu_number(); cpu_data_t* current_cpu_data; if(smp_number == 0) return &cpu_data_master; current_cpu_data = cpu_datap(smp_number); if(!current_cpu_data) { panic("cpu_data for slot %d is not available yet\n", smp_number); } return current_cpu_data; }
void mca_cpu_alloc(cpu_data_t *cdp) { vm_size_t mca_state_size; /* * Allocate space for an array of error banks. */ mca_state_size = sizeof(mca_state_t) + sizeof(mca_mci_bank_t) * mca_error_bank_count; cdp->cpu_mca_state = kalloc(mca_state_size); if (cdp->cpu_mca_state == NULL) { printf("mca_cpu_alloc() failed for cpu %d\n", cdp->cpu_number); return; } bzero((void *) cdp->cpu_mca_state, mca_state_size); /* * If the boot processor is yet have its allocation made, * do this now. */ if (cpu_datap(master_cpu)->cpu_mca_state == NULL) mca_cpu_alloc(cpu_datap(master_cpu)); }
/* ----------------------------------------------------------------------------- vmx_allocate_vmxon_regions() Allocate, clear and init VMXON regions for all CPUs. -------------------------------------------------------------------------- */ static void vmx_allocate_vmxon_regions(void) { unsigned int i; for (i=0; i<real_ncpus; i++) { vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; /* The size is defined to be always <= 4K, so we just allocate a page */ cpu->vmxon_region = vmx_pcalloc(); if (NULL == cpu->vmxon_region) panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region"); *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id; } }
/** * timer_queue_assign * * Assign a deadline and return the current processor's timer queue. */ mpqueue_head_t *timer_queue_assign(uint64_t deadline) { cpu_data_t *cdp = current_cpu_datap(); mpqueue_head_t *queue; if (cdp->cpu_running) { queue = &cdp->rt_timer.queue; if (deadline < cdp->rt_timer.deadline) { etimer_set_deadline(deadline); } } else { queue = &cpu_datap(master_cpu)->rt_timer.queue; } return (queue); }
/* ----------------------------------------------------------------------------- vmx_globally_available() Checks whether VT can be turned on for all CPUs. -------------------------------------------------------------------------- */ static boolean_t vmx_globally_available(void) { unsigned int i; boolean_t available = TRUE; for (i=0; i<real_ncpus; i++) { vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; if (!cpu->specs.vmx_present) available = FALSE; } VMX_KPRINTF("VMX available: %d\n", available); return available; }
void cpu_exit_wait( int cpu) { cpu_data_t *cdp = cpu_datap(cpu); /* * Wait until the CPU indicates that it has stopped. */ simple_lock(&x86_topo_lock); while ((cdp->lcpu.state != LCPU_HALT) && (cdp->lcpu.state != LCPU_OFF) && !cdp->lcpu.stopped) { simple_unlock(&x86_topo_lock); cpu_pause(); simple_lock(&x86_topo_lock); } simple_unlock(&x86_topo_lock); }
/* * timer_queue_migrate_cpu() is called from the Power-Management kext * when a logical processor goes idle (in a deep C-state) with a distant * deadline so that it's timer queue can be moved to another processor. * This target processor should be the least idle (most busy) -- * currently this is the primary processor for the calling thread's package. * Locking restrictions demand that the target cpu must be the boot cpu. */ uint32_t timer_queue_migrate_cpu(int target_cpu) { cpu_data_t *target_cdp = cpu_datap(target_cpu); cpu_data_t *cdp = current_cpu_datap(); int ntimers_moved; assert(!ml_get_interrupts_enabled()); assert(target_cpu != cdp->cpu_number); assert(target_cpu == master_cpu); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_TIMER_MIGRATE | DBG_FUNC_START, target_cpu, cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32), 0, 0); /* * Move timer requests from the local queue to the target processor's. * The return value is the number of requests moved. If this is 0, * it indicates that the first (i.e. earliest) timer is earlier than * the earliest for the target processor. Since this would force a * resync, the move of this and all later requests is aborted. */ ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue, &target_cdp->rtclock_timer.queue); /* * Assuming we moved stuff, clear local deadline. */ if (ntimers_moved > 0) { cdp->rtclock_timer.deadline = EndOfAllTime; setPop(EndOfAllTime); } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_TIMER_MIGRATE | DBG_FUNC_END, target_cpu, ntimers_moved, 0, 0, 0); return ntimers_moved; }
processor_t machine_choose_processor(processor_set_t pset, processor_t preferred) { int startCPU; int endCPU; int preferredCPU; int chosenCPU; if (!pmInitDone) return(preferred); if (pset == NULL) { startCPU = -1; endCPU = -1; } else { startCPU = pset->cpu_set_low; endCPU = pset->cpu_set_hi; } if (preferred == NULL) preferredCPU = -1; else preferredCPU = preferred->cpu_id; if (pmDispatch != NULL && pmDispatch->pmChooseCPU != NULL) { chosenCPU = pmDispatch->pmChooseCPU(startCPU, endCPU, preferredCPU); if (chosenCPU == -1) return(NULL); return(cpu_datap(chosenCPU)->cpu_processor); } return(preferred); }
/** * cpu_to_processor * * Return the procesor_t for a specified processor. Please don't * do bad things. * * This function needs validation to ensure that the cpu data accessed * isn't null/exceeding buffer boundaries. */ processor_t cpu_to_processor(int cpu) { assert(cpu_datap(cpu) != NULL); return cpu_datap(cpu)->cpu_processor; }
void mca_dump(void) { mca_state_t *mca_state = current_cpu_datap()->cpu_mca_state; uint64_t deadline; unsigned int i = 0; /* * Capture local MCA registers to per-cpu data. */ mca_save_state(mca_state); /* * Serialize: the first caller controls dumping MCA registers, * other threads spin meantime. */ simple_lock(&mca_lock); if (mca_dump_state > CLEAR) { simple_unlock(&mca_lock); while (mca_dump_state == DUMPING) cpu_pause(); return; } mca_dump_state = DUMPING; simple_unlock(&mca_lock); /* * Wait for all other hardware threads to save their state. * Or timeout. */ deadline = mach_absolute_time() + LockTimeOut; while (mach_absolute_time() < deadline && i < real_ncpus) { if (!cpu_datap(i)->cpu_mca_state->mca_is_saved) { cpu_pause(); continue; } i += 1; } /* * Report machine-check capabilities: */ kdb_printf("Machine-check capabilities: 0x%016qx\n", ia32_mcg_cap.u64); mca_report_cpu_info(); kdb_printf(" %d error-reporting banks\n", mca_error_bank_count); /* * Dump all processor state: */ for (i = 0; i < real_ncpus; i++) { mca_state_t *mcsp = cpu_datap(i)->cpu_mca_state; ia32_mcg_status_t status; if (mcsp == NULL || mcsp->mca_is_saved == FALSE || mcsp->mca_mcg_status.u64 == 0 || !mcsp->mca_is_valid) { continue; } status = mcsp->mca_mcg_status; kdb_printf("Processor %d: IA32_MCG_STATUS: 0x%016qx\n", i, status.u64); mca_cpu_dump_error_banks(mcsp); } /* Update state to release any other threads. */ mca_dump_state = DUMPED; }
void mca_dump(void) { mca_state_t *mca_state = current_cpu_datap()->cpu_mca_state; uint64_t deadline; unsigned int i = 0; /* * Capture local MCA registers to per-cpu data. */ mca_save_state(mca_state); /* * Serialize: the first caller controls dumping MCA registers, * other threads spin meantime. */ simple_lock(&mca_lock); if (mca_dump_state > CLEAR) { simple_unlock(&mca_lock); while (mca_dump_state == DUMPING) cpu_pause(); return; } mca_dump_state = DUMPING; simple_unlock(&mca_lock); /* * Wait for all other hardware threads to save their state. * Or timeout. */ deadline = mach_absolute_time() + LockTimeOut; while (mach_absolute_time() < deadline && i < real_ncpus) { if (!cpu_datap(i)->cpu_mca_state->mca_is_saved) { cpu_pause(); continue; } i += 1; } /* * Report machine-check capabilities: */ kdb_printf( "Machine-check capabilities 0x%016qx:\n", ia32_mcg_cap.u64); mca_report_cpu_info(); kdb_printf( " %d error-reporting banks\n%s%s%s", mca_error_bank_count, IF(mca_control_MSR_present, " control MSR present\n"), IF(mca_threshold_status_present, " threshold-based error status present\n"), IF(mca_cmci_present, " extended corrected memory error handling present\n")); if (mca_extended_MSRs_present) kdb_printf( " %d extended MSRs present\n", mca_extended_MSRs_count); /* * Dump all processor state: */ for (i = 0; i < real_ncpus; i++) { mca_state_t *mcsp = cpu_datap(i)->cpu_mca_state; ia32_mcg_status_t status; kdb_printf("Processor %d: ", i); if (mcsp == NULL || mcsp->mca_is_saved == FALSE || mcsp->mca_mcg_status.u64 == 0) { kdb_printf("no machine-check status reported\n"); continue; } if (!mcsp->mca_is_valid) { kdb_printf("no valid machine-check state\n"); continue; } status = mcsp->mca_mcg_status; kdb_printf( "machine-check status 0x%016qx:\n%s%s%s", status.u64, IF(status.bits.ripv, " restart IP valid\n"), IF(status.bits.eipv, " error IP valid\n"), IF(status.bits.mcip, " machine-check in progress\n")); mca_cpu_dump_error_banks(mcsp); } /* * Dump any extended machine state: */ if (mca_extended_MSRs_present) { if (cpu_mode_is64bit()) mca_dump_64bit_state(); else mca_dump_32bit_state(); } /* Update state to release any other threads. */ mca_dump_state = DUMPED; }
processor_t cpu_to_processor( int cpu) { return cpu_datap(cpu)->cpu_processor; }
mpqueue_head_t * timer_queue_cpu(int cpu) { return &cpu_datap(cpu)->rtclock_timer.queue; }
void pmap_pcid_configure(void) { int ccpu = cpu_number(); uintptr_t cr4 = get_cr4(); boolean_t pcid_present = FALSE; pmap_pcid_log("PCID configure invoked on CPU %d\n", ccpu); pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0); pmap_assert(cpu_mode_is64bit()); if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled, sizeof (pmap_pcid_disabled))) { pmap_pcid_log("PMAP: PCID feature disabled\n"); printf("PMAP: PCID feature disabled, %u\n", pmap_pcid_disabled); kprintf("PMAP: PCID feature disabled %u\n", pmap_pcid_disabled); } /* no_shared_cr3+PCID is currently unsupported */ #if DEBUG if (pmap_pcid_disabled == FALSE) no_shared_cr3 = FALSE; else no_shared_cr3 = TRUE; #else if (no_shared_cr3) pmap_pcid_disabled = TRUE; #endif if (pmap_pcid_disabled || no_shared_cr3) { unsigned i; /* Reset PCID status, as we may have picked up * strays if discovered prior to platform * expert initialization. */ for (i = 0; i < real_ncpus; i++) { if (cpu_datap(i)) { cpu_datap(i)->cpu_pmap_pcid_enabled = FALSE; } pmap_pcid_ncpus = 0; } cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE; return; } /* DRKTODO: assert if features haven't been discovered yet. Redundant * invocation of cpu_mode_init and descendants masks this for now. */ if ((cpuid_features() & CPUID_FEATURE_PCID)) pcid_present = TRUE; else { cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE; pmap_pcid_log("PMAP: PCID not detected CPU %d\n", ccpu); return; } if ((cr4 & (CR4_PCIDE | CR4_PGE)) == (CR4_PCIDE|CR4_PGE)) { cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE; pmap_pcid_log("PMAP: PCID already enabled %d\n", ccpu); return; } if (pcid_present == TRUE) { pmap_pcid_log("Pre-PCID:CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, cr4); if (cpu_number() >= PMAP_PCID_MAX_CPUS) { panic("PMAP_PCID_MAX_CPUS %d\n", cpu_number()); } if ((get_cr4() & CR4_PGE) == 0) { set_cr4(get_cr4() | CR4_PGE); pmap_pcid_log("Toggled PGE ON (CPU: %d\n", ccpu); } set_cr4(get_cr4() | CR4_PCIDE); pmap_pcid_log("Post PCID: CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, get_cr4()); tlb_flush_global(); cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE; if (OSIncrementAtomic(&pmap_pcid_ncpus) == machine_info.max_cpus) { pmap_pcid_log("All PCIDs enabled: real_ncpus: %d, pmap_pcid_ncpus: %d\n", real_ncpus, pmap_pcid_ncpus); } cpu_datap(ccpu)->cpu_pmap_pcid_coherentp = cpu_datap(ccpu)->cpu_pmap_pcid_coherentp_kernel = &(kernel_pmap->pmap_pcid_coherency_vector[ccpu]); cpu_datap(ccpu)->cpu_pcid_refcounts[0] = 1; } }
cpu_subtype_t slot_subtype( int slot_num) { return (cpu_datap(slot_num)->cpu_subtype); }
cpu_threadtype_t slot_threadtype( int slot_num) { return (cpu_datap(slot_num)->cpu_threadtype); }
/** * arm_init * * Initialize the core ARM subsystems, this routine is called from the * boot loader. A basic identity mapping is created in __start, however, * arm_vm_init will create new mappings. */ void arm_init(boot_args * args) { cpu_data_t *bootProcessorData; processor_t bootProcessor; uint32_t baMaxMem; uint64_t maxMem; thread_t thread; /* * We are in. */ PE_early_puts("arm_init: starting up\n"); /* * arm_init is only called on processor #0, the others will enter using arm_slave_init. */ bootProcessor = cpu_processor_alloc(TRUE); if (!bootProcessor) { panic("cpu_processor_alloc failed\n"); } /* * Pin the processor information to CPU #0. */ PE_early_puts("arm_init: calling cpu_bootstrap\n"); cpu_bootstrap(); /* * Initialize core processor data. */ bootProcessorData = current_cpu_datap(); bootProcessorData->cpu_number = 0; bootProcessorData->cpu_active_stack = (vm_offset_t)&irqstack; bootProcessorData->cpu_phys_number = 0; bootProcessorData->cpu_preemption_level = 1; bootProcessorData->cpu_interrupt_level = 0; bootProcessorData->cpu_running = 1; bootProcessorData->cpu_pending_ast = AST_NONE; /* * Initialize the core thread subsystem (This sets up a template * which will then be used to initialize the rest of the thread * system later.) * * Additionally, this also sets the current kernel thread register * to our bootstrap thread. */ PE_early_puts("arm_init: calling thread_bootstrap\n"); thread_bootstrap(); /* * CPU initialization. */ PE_early_puts("arm_init: calling cpu_init\n"); cpu_init(); /* * Mach processor bootstrap. */ PE_early_puts("arm_init: calling processor_bootstrap\n"); processor_bootstrap(); /* * Initialize the ARM platform expert. */ PE_early_puts("arm_init: calling PE_init_platform\n"); PE_init_platform(FALSE, (void *) args); /* * Initialize kprintf, but no VM is running yet. */ PE_init_kprintf(FALSE); /* * Set maximum memory size based on boot-args. */ if (!PE_parse_boot_argn("maxmem", &baMaxMem, sizeof(baMaxMem))) maxMem = 0; else maxMem = (uint64_t) baMaxMem *(1024 * 1024); /* * After this, we'll no longer be using physical mappings created by the bootloader. */ arm_vm_init(maxMem, args); /* * Kernel early bootstrap. */ kernel_early_bootstrap(); /* * PE platform init. */ PE_init_platform(TRUE, (void *) args); /* * Enable I+D cache. */ char tempbuf[16]; if (PE_parse_boot_argn("-no-cache", tempbuf, sizeof(tempbuf))) { kprintf("cache: No caching enabled (I+D).\n"); } else { kprintf("cache: initializing i+dcache ... "); cache_initialize(); kprintf("done\n"); } /* * Specify serial mode. */ serialmode = 0; if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) { /* * We want a serial keyboard and/or console */ kprintf("Serial mode specified: %08X\n", serialmode); } if (serialmode & 1) { (void) switch_to_serial_console(); disableConsoleOutput = FALSE; /* Allow printfs to happen */ } /* * Start system timers. */ thread = current_thread(); thread->machine.preempt_count = 1; thread->machine.cpu_data = cpu_datap(cpu_number()); thread->kernel_stack = irqstack; timer_start(&thread->system_timer, mach_absolute_time()); /* * Processor identification. */ arm_processor_identify(); /* * VFP/float initialization. */ init_vfp(); /* * Machine startup. */ machine_startup(); /* * If we return, something very bad is happening. */ panic("20:02:14 <DHowett> wwwwwwwat is HAAAAAAAPPENING\n"); /* * Last chance. */ while (1) ; }
static boolean_t pmCPUGetHibernate(int cpu) { return(cpu_datap(cpu)->cpu_hibernate); }
static processor_t pmLCPUtoProcessor(int lcpu) { return(cpu_datap(lcpu)->cpu_processor); }
/** * arm_init * * Initialize the core ARM subsystems, this routine is called from the * boot loader. A basic identity mapping is created in __start, however, * arm_vm_init will create new mappings. */ void arm_init(boot_args* args) { cpu_data_t* bootProcessorData; processor_t bootProcessor; uint32_t baMaxMem; uint64_t maxMem; thread_t thread; /* * Welcome to arm_init, may I take your order? */ PE_early_puts("arm_init: starting up\n"); /* * arm_init is only called on processor #0, the others will enter using arm_slave_init. */ bootProcessor = cpu_processor_alloc(TRUE); if(!bootProcessor) { panic("Something really wacky happened here with cpu_processor_alloc\n"); } /* * Pin the processor information to CPU #0. */ PE_early_puts("arm_init: calling cpu_bootstrap\n"); cpu_bootstrap(); /* * Initialize core processor data. */ bootProcessorData = current_cpu_datap(); bootProcessorData->cpu_number = 0; bootProcessorData->cpu_active_stack = &irqstack; bootProcessorData->cpu_phys_number = 0; bootProcessorData->cpu_preemption_level = 1; bootProcessorData->cpu_interrupt_level = 0; bootProcessorData->cpu_running = 1; /* * Initialize the core thread subsystem (This sets up a template * which will then be used to initialize the rest of the thread * system later.) * * Additionally, this also sets the current kernel thread register * to our bootstrap thread. */ PE_early_puts("arm_init: calling thread_bootstrap\n"); thread_bootstrap(); /* * CPU initialization. */ PE_early_puts("arm_init: calling cpu_init\n"); cpu_init(); /* * Mach processor bootstrap. */ PE_early_puts("arm_init: calling processor_bootstrap\n"); processor_bootstrap(); /* * Initialize the ARM platform expert. */ PE_early_puts("arm_init: calling PE_init_platform\n"); PE_init_platform(FALSE, (void*)args); /* * Initialize kprintf, but no VM is running yet. */ PE_init_kprintf(FALSE); /* * Set maximum memory size based on boot-args. */ if(!PE_parse_boot_argn("maxmem", &baMaxMem, sizeof(baMaxMem))) maxMem = 0; else maxMem = (uint64_t)baMaxMem * (1024 * 1024); /* * After this, we'll no longer be using physical mappings created by the bootloader. */ arm_vm_init(maxMem, args); /* * Kernel early bootstrap. */ kernel_early_bootstrap(); /* * PE platform init. */ PE_init_platform(TRUE, (void*)args); /* * Enable I+D cache. */ char tempbuf[16]; if(PE_parse_boot_argn("-no-cache", tempbuf, sizeof(tempbuf))) { kprintf("cache: No caching enabled (I+D).\n"); } else { kprintf("cache: initializing i+dcache\n"); cache_initialize(); kprintf("cache: done\n"); } /* * Start system timers. */ thread = current_thread(); thread->machine.preempt_count = 1; thread->machine.cpu_data = cpu_datap(cpu_number()); thread->kernel_stack = irqstack; timer_start(&thread->system_timer, mach_absolute_time()); /* * VFP/float initialization. */ init_vfp(); /* * Machine startup. */ machine_startup(); /* * If anything returns, bad things(tm) have happened. */ PE_early_puts("arm_init: Still alive\n"); panic("why are we still here, NOO"); while(1); }