void M68KVMTranslationMap::Flush() { if (fInvalidPagesCount <= 0) return; Thread* thread = thread_get_current_thread(); thread_pin_to_current_cpu(thread); if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) { // invalidate all pages TRACE("flush_tmap: %d pages to invalidate, invalidate all\n", fInvalidPagesCount); if (fIsKernelMap) { arch_cpu_global_TLB_invalidate(); smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC); } else { cpu_status state = disable_interrupts(); arch_cpu_user_TLB_invalidate(); restore_interrupts(state); int cpu = smp_get_current_cpu(); CPUSet cpuMask = PagingStructures()->active_on_cpus; cpuMask.ClearBit(cpu); if (!cpuMask.IsEmpty()) { smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC); } } } else { TRACE("flush_tmap: %d pages to invalidate, invalidate list\n", fInvalidPagesCount); arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount); if (fIsKernelMap) { smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST, (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL, SMP_MSG_FLAG_SYNC); } else { int cpu = smp_get_current_cpu(); CPUSet cpuMask = PagingStructures()->active_on_cpus; cpuMask.ClearBit(cpu); if (!cpuMask.IsEmpty()) { smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST, (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL, SMP_MSG_FLAG_SYNC); } } } fInvalidPagesCount = 0; thread_unpin_from_current_cpu(thread); }
void call_all_cpus_sync(void (*func)(void*, int), void* cookie) { cpu_status state = disable_interrupts(); // if inter-CPU communication is not yet enabled, use the early mechanism if (!sICIEnabled) { call_all_cpus_early(func, cookie); restore_interrupts(state); return; } if (smp_get_num_cpus() > 1) { smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (addr_t)cookie, 0, 0, (void*)func, SMP_MSG_FLAG_SYNC); } // we need to call this function ourselves as well func(cookie, smp_get_current_cpu()); restore_interrupts(state); }