void M68KVMTranslationMap::Flush() { if (fInvalidPagesCount <= 0) return; Thread* thread = thread_get_current_thread(); thread_pin_to_current_cpu(thread); if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) { // invalidate all pages TRACE("flush_tmap: %d pages to invalidate, invalidate all\n", fInvalidPagesCount); if (fIsKernelMap) { arch_cpu_global_TLB_invalidate(); smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC); } else { cpu_status state = disable_interrupts(); arch_cpu_user_TLB_invalidate(); restore_interrupts(state); int cpu = smp_get_current_cpu(); CPUSet cpuMask = PagingStructures()->active_on_cpus; cpuMask.ClearBit(cpu); if (!cpuMask.IsEmpty()) { smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC); } } } else { TRACE("flush_tmap: %d pages to invalidate, invalidate list\n", fInvalidPagesCount); arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount); if (fIsKernelMap) { smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST, (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL, SMP_MSG_FLAG_SYNC); } else { int cpu = smp_get_current_cpu(); CPUSet cpuMask = PagingStructures()->active_on_cpus; cpuMask.ClearBit(cpu); if (!cpuMask.IsEmpty()) { smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST, (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL, SMP_MSG_FLAG_SYNC); } } } fInvalidPagesCount = 0; thread_unpin_from_current_cpu(thread); }
extern "C" int _start(kernel_args *bootKernelArgs, int currentCPU) { if (bootKernelArgs->kernel_args_size != sizeof(kernel_args) || bootKernelArgs->version != CURRENT_KERNEL_ARGS_VERSION) { // This is something we cannot handle right now - release kernels // should always be able to handle the kernel_args of earlier // released kernels. debug_early_boot_message("Version mismatch between boot loader and " "kernel!\n"); return -1; } smp_set_num_cpus(bootKernelArgs->num_cpus); // wait for all the cpus to get here smp_cpu_rendezvous(&sCpuRendezvous, currentCPU); // the passed in kernel args are in a non-allocated range of memory if (currentCPU == 0) memcpy(&sKernelArgs, bootKernelArgs, sizeof(kernel_args)); smp_cpu_rendezvous(&sCpuRendezvous2, currentCPU); // do any pre-booting cpu config cpu_preboot_init_percpu(&sKernelArgs, currentCPU); thread_preboot_init_percpu(&sKernelArgs, currentCPU); // if we're not a boot cpu, spin here until someone wakes us up if (smp_trap_non_boot_cpus(currentCPU, &sCpuRendezvous3)) { // init platform arch_platform_init(&sKernelArgs); // setup debug output debug_init(&sKernelArgs); set_dprintf_enabled(true); dprintf("Welcome to kernel debugger output!\n"); dprintf("Haiku revision: %lu\n", get_haiku_revision()); // init modules TRACE("init CPU\n"); cpu_init(&sKernelArgs); cpu_init_percpu(&sKernelArgs, currentCPU); TRACE("init interrupts\n"); int_init(&sKernelArgs); TRACE("init VM\n"); vm_init(&sKernelArgs); // Before vm_init_post_sem() is called, we have to make sure that // the boot loader allocated region is not used anymore boot_item_init(); debug_init_post_vm(&sKernelArgs); low_resource_manager_init(); // now we can use the heap and create areas arch_platform_init_post_vm(&sKernelArgs); lock_debug_init(); TRACE("init driver_settings\n"); driver_settings_init(&sKernelArgs); debug_init_post_settings(&sKernelArgs); TRACE("init notification services\n"); notifications_init(); TRACE("init teams\n"); team_init(&sKernelArgs); TRACE("init ELF loader\n"); elf_init(&sKernelArgs); TRACE("init modules\n"); module_init(&sKernelArgs); TRACE("init semaphores\n"); haiku_sem_init(&sKernelArgs); TRACE("init interrupts post vm\n"); int_init_post_vm(&sKernelArgs); cpu_init_post_vm(&sKernelArgs); commpage_init(); TRACE("init system info\n"); system_info_init(&sKernelArgs); TRACE("init SMP\n"); smp_init(&sKernelArgs); TRACE("init timer\n"); timer_init(&sKernelArgs); TRACE("init real time clock\n"); rtc_init(&sKernelArgs); TRACE("init condition variables\n"); condition_variable_init(); // now we can create and use semaphores TRACE("init VM semaphores\n"); vm_init_post_sem(&sKernelArgs); TRACE("init generic syscall\n"); generic_syscall_init(); smp_init_post_generic_syscalls(); TRACE("init scheduler\n"); scheduler_init(); TRACE("init threads\n"); thread_init(&sKernelArgs); TRACE("init kernel daemons\n"); kernel_daemon_init(); arch_platform_init_post_thread(&sKernelArgs); TRACE("init I/O interrupts\n"); int_init_io(&sKernelArgs); TRACE("init VM threads\n"); vm_init_post_thread(&sKernelArgs); low_resource_manager_init_post_thread(); TRACE("init VFS\n"); vfs_init(&sKernelArgs); #if ENABLE_SWAP_SUPPORT TRACE("init swap support\n"); swap_init(); #endif TRACE("init POSIX semaphores\n"); realtime_sem_init(); xsi_sem_init(); xsi_msg_init(); // Start a thread to finish initializing the rest of the system. Note, // it won't be scheduled before calling scheduler_start() (on any CPU). TRACE("spawning main2 thread\n"); thread_id thread = spawn_kernel_thread(&main2, "main2", B_NORMAL_PRIORITY, NULL); resume_thread(thread); // We're ready to start the scheduler and enable interrupts on all CPUs. scheduler_enable_scheduling(); // bring up the AP cpus in a lock step fashion TRACE("waking up AP cpus\n"); sCpuRendezvous = sCpuRendezvous2 = 0; smp_wake_up_non_boot_cpus(); smp_cpu_rendezvous(&sCpuRendezvous, 0); // wait until they're booted // exit the kernel startup phase (mutexes, etc work from now on out) TRACE("exiting kernel startup\n"); gKernelStartup = false; smp_cpu_rendezvous(&sCpuRendezvous2, 0); // release the AP cpus to go enter the scheduler TRACE("starting scheduler on cpu 0 and enabling interrupts\n"); scheduler_start(); enable_interrupts(); } else { // lets make sure we're in sync with the main cpu // the boot processor has probably been sending us // tlb sync messages all along the way, but we've // been ignoring them arch_cpu_global_TLB_invalidate(); // this is run for each non boot processor after they've been set loose cpu_init_percpu(&sKernelArgs, currentCPU); smp_per_cpu_init(&sKernelArgs, currentCPU); // wait for all other AP cpus to get to this point smp_cpu_rendezvous(&sCpuRendezvous, currentCPU); smp_cpu_rendezvous(&sCpuRendezvous2, currentCPU); // welcome to the machine scheduler_start(); enable_interrupts(); } #ifdef TRACE_BOOT // We disable interrupts for this dprintf(), since otherwise dprintf() // would acquires a mutex, which is something we must not do in an idle // thread, or otherwise the scheduler would be seriously unhappy. disable_interrupts(); TRACE("main: done... begin idle loop on cpu %d\n", currentCPU); enable_interrupts(); #endif for (;;) arch_cpu_idle(); return 0; }
static status_t process_pending_ici(int32 currentCPU) { mailbox_source sourceMailbox; struct smp_msg* msg = check_for_message(currentCPU, sourceMailbox); if (msg == NULL) return B_ENTRY_NOT_FOUND; TRACE((" cpu %ld message = %ld\n", currentCPU, msg->message)); bool haltCPU = false; switch (msg->message) { case SMP_MSG_INVALIDATE_PAGE_RANGE: arch_cpu_invalidate_TLB_range(msg->data, msg->data2); break; case SMP_MSG_INVALIDATE_PAGE_LIST: arch_cpu_invalidate_TLB_list((addr_t*)msg->data, (int)msg->data2); break; case SMP_MSG_USER_INVALIDATE_PAGES: arch_cpu_user_TLB_invalidate(); break; case SMP_MSG_GLOBAL_INVALIDATE_PAGES: arch_cpu_global_TLB_invalidate(); break; case SMP_MSG_CPU_HALT: haltCPU = true; break; case SMP_MSG_CALL_FUNCTION: { smp_call_func func = (smp_call_func)msg->data_ptr; func(msg->data, currentCPU, msg->data2, msg->data3); break; } case SMP_MSG_RESCHEDULE: { cpu_ent* cpu = thread_get_current_thread()->cpu; cpu->invoke_scheduler = true; cpu->invoke_scheduler_if_idle = false; break; } case SMP_MSG_RESCHEDULE_IF_IDLE: { cpu_ent* cpu = thread_get_current_thread()->cpu; if (!cpu->invoke_scheduler) { cpu->invoke_scheduler = true; cpu->invoke_scheduler_if_idle = true; } break; } default: dprintf("smp_intercpu_int_handler: got unknown message %" B_PRId32 "\n", msg->message); break; } // finish dealing with this message, possibly removing it from the list finish_message_processing(currentCPU, msg, sourceMailbox); // special case for the halt message if (haltCPU) debug_trap_cpu_in_kdl(currentCPU, false); return B_OK; }