/* * Source CPU calls into this - it waits for the freshly booted * target CPU to arrive and then starts the measurement: */ void __cpuinit check_tsc_sync_source(int cpu) { unsigned long flags; int cpus = 2; /* * No need to check if we already know that the TSC is not * synchronized: */ if (unsynchronized_tsc()) return; printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:", smp_processor_id(), cpu); /* * Reset it - in case this is a second bootup: */ atomic_set(&stop_count, 0); /* * Wait for the target to arrive: */ local_save_flags(flags); local_irq_enable(); while (atomic_read(&start_count) != cpus-1) cpu_relax(); local_irq_restore(flags); /* * Trigger the target to continue into the measurement too: */ atomic_inc(&start_count); check_tsc_warp(); while (atomic_read(&stop_count) != cpus-1) cpu_relax(); /* * Reset it - just in case we boot another CPU later: */ atomic_set(&start_count, 0); if (nr_warps) { printk("\n"); printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs," " turning off TSC clock.\n", max_warp); mark_tsc_unstable(); nr_warps = 0; max_warp = 0; last_tsc = 0; } else { printk(" passed.\n"); } /* * Let the target continue with the bootup: */ atomic_inc(&stop_count); }
/* * Freshly booted CPUs call into this: */ void __cpuinit check_tsc_sync_target(void) { int cpus = 2; if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) return; /* * Register this CPU's participation and wait for the * source CPU to start the measurement: */ atomic_inc(&start_count); while (atomic_read(&start_count) != cpus) cpu_relax(); check_tsc_warp(); /* * Ok, we are done: */ atomic_inc(&stop_count); /* * Wait for the source CPU to print stuff: */ while (atomic_read(&stop_count) != cpus) cpu_relax(); }
/* * Freshly booted CPUs call into this: */ void __cpuinit check_tsc_sync_target(void) { int cpus = 2; if (unsynchronized_tsc() || tsc_clocksource_reliable) return; /* * Register this CPU's participation and wait for the * source CPU to start the measurement: */ atomic_inc(&start_count); while (atomic_read(&start_count) != cpus) cpu_relax(); check_tsc_warp(loop_timeout(smp_processor_id())); /* * Ok, we are done: */ atomic_inc(&stop_count); /* * Wait for the source CPU to print stuff: */ while (atomic_read(&stop_count) != cpus) cpu_relax(); }
/* * Freshly booted CPUs call into this: */ void check_tsc_sync_target(void) { struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust); unsigned int cpu = smp_processor_id(); cycles_t cur_max_warp, gbl_max_warp; int cpus = 2; /* Also aborts if there is no TSC. */ if (unsynchronized_tsc()) return; /* * Store, verify and sanitize the TSC adjust register. If * successful skip the test. * * The test is also skipped when the TSC is marked reliable. This * is true for SoCs which have no fallback clocksource. On these * SoCs the TSC is frequency synchronized, but still the TSC ADJUST * register might have been wreckaged by the BIOS.. */ if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) { atomic_inc(&skip_test); return; } retry: /* * Register this CPU's participation and wait for the * source CPU to start the measurement: */ atomic_inc(&start_count); while (atomic_read(&start_count) != cpus) cpu_relax(); cur_max_warp = check_tsc_warp(loop_timeout(cpu)); /* * Store the maximum observed warp value for a potential retry: */ gbl_max_warp = max_warp; /* * Ok, we are done: */ atomic_inc(&stop_count); /* * Wait for the source CPU to print stuff: */ while (atomic_read(&stop_count) != cpus) cpu_relax(); /* * Reset it for the next sync test: */ atomic_set(&stop_count, 0); /* * Check the number of remaining test runs. If not zero, the test * failed and a retry with adjusted TSC is possible. If zero the * test was either successful or failed terminally. */ if (!atomic_read(&test_runs)) return; /* * If the warp value of this CPU is 0, then the other CPU * observed time going backwards so this TSC was ahead and * needs to move backwards. */ if (!cur_max_warp) cur_max_warp = -gbl_max_warp; /* * Add the result to the previous adjustment value. * * The adjustement value is slightly off by the overhead of the * sync mechanism (observed values are ~200 TSC cycles), but this * really depends on CPU, node distance and frequency. So * compensating for this is hard to get right. Experiments show * that the warp is not longer detectable when the observed warp * value is used. In the worst case the adjustment needs to go * through a 3rd run for fine tuning. */ cur->adjusted += cur_max_warp; pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n", cpu, cur_max_warp, cur->adjusted); wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted); goto retry; }
/* * Source CPU calls into this - it waits for the freshly booted * target CPU to arrive and then starts the measurement: */ void check_tsc_sync_source(int cpu) { int cpus = 2; /* * No need to check if we already know that the TSC is not * synchronized or if we have no TSC. */ if (unsynchronized_tsc()) return; /* * Set the maximum number of test runs to * 1 if the CPU does not provide the TSC_ADJUST MSR * 3 if the MSR is available, so the target can try to adjust */ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) atomic_set(&test_runs, 1); else atomic_set(&test_runs, 3); retry: /* * Wait for the target to start or to skip the test: */ while (atomic_read(&start_count) != cpus - 1) { if (atomic_read(&skip_test) > 0) { atomic_set(&skip_test, 0); return; } cpu_relax(); } /* * Trigger the target to continue into the measurement too: */ atomic_inc(&start_count); check_tsc_warp(loop_timeout(cpu)); while (atomic_read(&stop_count) != cpus-1) cpu_relax(); /* * If the test was successful set the number of runs to zero and * stop. If not, decrement the number of runs an check if we can * retry. In case of random warps no retry is attempted. */ if (!nr_warps) { atomic_set(&test_runs, 0); pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", smp_processor_id(), cpu); } else if (atomic_dec_and_test(&test_runs) || random_warps) { /* Force it to 0 if random warps brought us here */ atomic_set(&test_runs, 0); pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", smp_processor_id(), cpu); pr_warning("Measured %Ld cycles TSC warp between CPUs, " "turning off TSC clock.\n", max_warp); if (random_warps) pr_warning("TSC warped randomly between CPUs\n"); mark_tsc_unstable("check_tsc_sync_source failed"); } /* * Reset it - just in case we boot another CPU later: */ atomic_set(&start_count, 0); random_warps = 0; nr_warps = 0; max_warp = 0; last_tsc = 0; /* * Let the target continue with the bootup: */ atomic_inc(&stop_count); /* * Retry, if there is a chance to do so. */ if (atomic_read(&test_runs) > 0) goto retry; }
/* * Source CPU calls into this - it waits for the freshly booted * target CPU to arrive and then starts the measurement: */ void __cpuinit check_tsc_sync_source(int cpu) { int cpus = 2; /* * No need to check if we already know that the TSC is not * synchronized: */ if (unsynchronized_tsc()) return; if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { printk(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n"); return; } printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:", smp_processor_id(), cpu); /* * Reset it - in case this is a second bootup: */ atomic_set(&stop_count, 0); /* * Wait for the target to arrive: */ while (atomic_read(&start_count) != cpus-1) cpu_relax(); /* * Trigger the target to continue into the measurement too: */ atomic_inc(&start_count); check_tsc_warp(); while (atomic_read(&stop_count) != cpus-1) cpu_relax(); if (nr_warps) { printk("\n"); printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs," " turning off TSC clock.\n", max_warp); mark_tsc_unstable("check_tsc_sync_source failed"); } else { printk(" passed.\n"); } /* * Reset it - just in case we boot another CPU later: */ atomic_set(&start_count, 0); nr_warps = 0; max_warp = 0; last_tsc = 0; /* * Let the target continue with the bootup: */ atomic_inc(&stop_count); }
/* * Source CPU calls into this - it waits for the freshly booted * target CPU to arrive and then starts the measurement: */ void __cpuinit check_tsc_sync_source(int cpu) { int cpus = 2; /* * No need to check if we already know that the TSC is not * synchronized: */ if (unsynchronized_tsc()) return; if (tsc_clocksource_reliable) { if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) pr_info( "Skipped synchronization checks as TSC is reliable.\n"); return; } /* * Reset it - in case this is a second bootup: */ atomic_set(&stop_count, 0); /* * Wait for the target to arrive: */ while (atomic_read(&start_count) != cpus-1) cpu_relax(); /* * Trigger the target to continue into the measurement too: */ atomic_inc(&start_count); check_tsc_warp(loop_timeout(cpu)); while (atomic_read(&stop_count) != cpus-1) cpu_relax(); if (nr_warps) { pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", smp_processor_id(), cpu); pr_warning("Measured %Ld cycles TSC warp between CPUs, " "turning off TSC clock.\n", max_warp); mark_tsc_unstable("check_tsc_sync_source failed"); } else { pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", smp_processor_id(), cpu); } /* * Reset it - just in case we boot another CPU later: */ atomic_set(&start_count, 0); nr_warps = 0; max_warp = 0; last_tsc = 0; /* * Let the target continue with the bootup: */ atomic_inc(&stop_count); }