static void stop_other_cpus(void) { ulong_t s = clear_int_flag(); /* fast way to keep CPU from changing */ cpuset_t xcset; CPUSET_ALL_BUT(xcset, CPU->cpu_id); xc_priority(0, 0, 0, CPUSET2BV(xcset), (xc_func_t)mach_cpu_halt); restore_int_flag(s); }
int sysp_ischar() { int i; ulong_t s; if (cons_polledio == NULL || cons_polledio->cons_polledio_ischar == NULL) return (0); s = clear_int_flag(); i = cons_polledio->cons_polledio_ischar( cons_polledio->cons_polledio_argument); restore_int_flag(s); return (i); }
void sysp_putchar(int c) { ulong_t s; /* * We have no alternative but to drop the output on the floor. */ if (cons_polledio == NULL || cons_polledio->cons_polledio_putchar == NULL) return; s = clear_int_flag(); cons_polledio->cons_polledio_putchar( cons_polledio->cons_polledio_argument, c); restore_int_flag(s); }
int sysp_getchar() { int i; ulong_t s; if (cons_polledio == NULL) { /* Uh oh */ prom_printf("getchar called with no console\n"); for (;;) /* LOOP FOREVER */; } s = clear_int_flag(); i = cons_polledio->cons_polledio_getchar( cons_polledio->cons_polledio_argument); restore_int_flag(s); return (i); }
/* * Called by a CPU which has just been onlined. It is expected that the CPU * performing the online operation will call tsc_sync_master(). * * TSC sync is disabled in the context of virtualization. See comments * above tsc_sync_master. */ void tsc_sync_slave(void) { ulong_t flags; hrtime_t s1; tsc_sync_t *tsc = tscp; int cnt; int hwtype; hwtype = get_hwenv(); if (!tsc_master_slave_sync_needed || hwtype == HW_XEN_HVM || hwtype == HW_VMWARE) return; flags = clear_int_flag(); for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) { /* Re-fill the cache line */ s1 = tsc->master_tsc; membar_enter(); tsc_sync_go = TSC_SYNC_GO; do { /* * Do not put an SMT_PAUSE here. For instance, * if the master and slave are really the same * hyper-threaded CPU, then you want the master * to yield to the slave as quickly as possible here, * but not the other way. */ s1 = tsc_read(); } while (tsc->master_tsc == 0); tsc->slave_tsc = s1; membar_enter(); tsc_sync_go = TSC_SYNC_DONE; while (tsc_sync_go != TSC_SYNC_STOP) SMT_PAUSE(); } restore_int_flag(flags); }
hrtime_t tsc_gethrtimeunscaled_delta(void) { hrtime_t hrt; ulong_t flags; /* * Similarly to tsc_gethrtime_delta, we need to disable preemption * to prevent migration between the call to tsc_gethrtimeunscaled * and adding the CPU's hrtime delta. Note that disabling and * reenabling preemption is forbidden here because we may be in the * middle of a fast trap. In the amd64 kernel we cannot tolerate * preemption during a fast trap. See _update_sregs(). */ flags = clear_int_flag(); hrt = tsc_gethrtimeunscaled() + tsc_sync_tick_delta[CPU->cpu_id]; restore_int_flag(flags); return (hrt); }
hrtime_t tsc_gethrtime_delta(void) { uint32_t old_hres_lock; hrtime_t tsc, hrt; ulong_t flags; do { old_hres_lock = hres_lock; /* * We need to disable interrupts here to assure that we * don't migrate between the call to tsc_read() and * adding the CPU's TSC tick delta. Note that disabling * and reenabling preemption is forbidden here because * we may be in the middle of a fast trap. In the amd64 * kernel we cannot tolerate preemption during a fast * trap. See _update_sregs(). */ flags = clear_int_flag(); tsc = tsc_read() + tsc_sync_tick_delta[CPU->cpu_id]; restore_int_flag(flags); /* See comments in tsc_gethrtime() above */ if (tsc >= tsc_last) { tsc -= tsc_last; } else if (tsc >= tsc_last - 2 * tsc_max_delta) { tsc = 0; } hrt = tsc_hrtime_base; TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale); } while ((old_hres_lock & ~1) != hres_lock); return (hrt); }
void tsc_hrtimeinit(uint64_t cpu_freq_hz) { extern int gethrtime_hires; longlong_t tsc; ulong_t flags; /* * cpu_freq_hz is the measured cpu frequency in hertz */ /* * We can't accommodate CPUs slower than 31.25 MHz. */ ASSERT(cpu_freq_hz > NANOSEC / (1 << NSEC_SHIFT)); nsec_scale = (uint_t)(((uint64_t)NANOSEC << (32 - NSEC_SHIFT)) / cpu_freq_hz); nsec_unscale = (uint_t)(((uint64_t)cpu_freq_hz << (32 - NSEC_SHIFT)) / NANOSEC); flags = clear_int_flag(); tsc = tsc_read(); (void) tsc_gethrtime(); tsc_max_delta = tsc_read() - tsc; restore_int_flag(flags); gethrtimef = tsc_gethrtime; gethrtimeunscaledf = tsc_gethrtimeunscaled; scalehrtimef = tsc_scalehrtime; unscalehrtimef = tsc_unscalehrtime; hrtime_tick = tsc_tick; gethrtime_hires = 1; /* * Allocate memory for the structure used in the tsc sync logic. * This structure should be aligned on a multiple of cache line size. */ tscp = kmem_zalloc(PAGESIZE, KM_SLEEP); }
/* * Called by the master in the TSC sync operation (usually the boot CPU). * If the slave is discovered to have a skew, gethrtimef will be changed to * point to tsc_gethrtime_delta(). Calculating skews is precise only when * the master and slave TSCs are read simultaneously; however, there is no * algorithm that can read both CPUs in perfect simultaneity. The proposed * algorithm is an approximate method based on the behaviour of cache * management. The slave CPU continuously reads TSC and then reads a global * variable which the master CPU updates. The moment the master's update reaches * the slave's visibility (being forced by an mfence operation) we use the TSC * reading taken on the slave. A corresponding TSC read will be taken on the * master as soon as possible after finishing the mfence operation. But the * delay between causing the slave to notice the invalid cache line and the * competion of mfence is not repeatable. This error is heuristically assumed * to be 1/4th of the total write time as being measured by the two TSC reads * on the master sandwiching the mfence. Furthermore, due to the nature of * bus arbitration, contention on memory bus, etc., the time taken for the write * to reflect globally can vary a lot. So instead of taking a single reading, * a set of readings are taken and the one with least write time is chosen * to calculate the final skew. * * TSC sync is disabled in the context of virtualization because the CPUs * assigned to the guest are virtual CPUs which means the real CPUs on which * guest runs keep changing during life time of guest OS. So we would end up * calculating TSC skews for a set of CPUs during boot whereas the guest * might migrate to a different set of physical CPUs at a later point of * time. */ void tsc_sync_master(processorid_t slave) { ulong_t flags, source, min_write_time = ~0UL; hrtime_t write_time, x, mtsc_after, tdelta; tsc_sync_t *tsc = tscp; int cnt; int hwtype; hwtype = get_hwenv(); if (!tsc_master_slave_sync_needed || hwtype == HW_XEN_HVM || hwtype == HW_VMWARE) return; flags = clear_int_flag(); source = CPU->cpu_id; for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) { while (tsc_sync_go != TSC_SYNC_GO) SMT_PAUSE(); tsc->master_tsc = tsc_read(); membar_enter(); mtsc_after = tsc_read(); while (tsc_sync_go != TSC_SYNC_DONE) SMT_PAUSE(); write_time = mtsc_after - tsc->master_tsc; if (write_time <= min_write_time) { min_write_time = write_time; /* * Apply heuristic adjustment only if the calculated * delta is > 1/4th of the write time. */ x = tsc->slave_tsc - mtsc_after; if (x < 0) x = -x; if (x > (min_write_time/4)) /* * Subtract 1/4th of the measured write time * from the master's TSC value, as an estimate * of how late the mfence completion came * after the slave noticed the cache line * change. */ tdelta = tsc->slave_tsc - (mtsc_after - (min_write_time/4)); else tdelta = tsc->slave_tsc - mtsc_after; tsc_sync_tick_delta[slave] = tsc_sync_tick_delta[source] - tdelta; } tsc->master_tsc = tsc->slave_tsc = write_time = 0; membar_enter(); tsc_sync_go = TSC_SYNC_STOP; } if (tdelta < 0) tdelta = -tdelta; if (tdelta > largest_tsc_delta) largest_tsc_delta = tdelta; if (min_write_time < shortest_write_time) shortest_write_time = min_write_time; /* * Enable delta variants of tsc functions if the largest of all chosen * deltas is > smallest of the write time. */ if (largest_tsc_delta > shortest_write_time) { gethrtimef = tsc_gethrtime_delta; gethrtimeunscaledf = tsc_gethrtimeunscaled_delta; } restore_int_flag(flags); }
/* * This is similar to the above, but it cannot actually spin on hres_lock. * As a result, it caches all of the variables it needs; if the variables * don't change, it's done. */ hrtime_t dtrace_gethrtime(void) { uint32_t old_hres_lock; hrtime_t tsc, hrt; ulong_t flags; do { old_hres_lock = hres_lock; /* * Interrupts are disabled to ensure that the thread isn't * migrated between the tsc_read() and adding the CPU's * TSC tick delta. */ flags = clear_int_flag(); tsc = tsc_read(); if (gethrtimef == tsc_gethrtime_delta) tsc += tsc_sync_tick_delta[CPU->cpu_id]; restore_int_flag(flags); /* * See the comments in tsc_gethrtime(), above. */ if (tsc >= tsc_last) tsc -= tsc_last; else if (tsc >= tsc_last - 2*tsc_max_delta) tsc = 0; hrt = tsc_hrtime_base; TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale); if ((old_hres_lock & ~1) == hres_lock) break; /* * If we're here, the clock lock is locked -- or it has been * unlocked and locked since we looked. This may be due to * tsc_tick() running on another CPU -- or it may be because * some code path has ended up in dtrace_probe() with * CLOCK_LOCK held. We'll try to determine that we're in * the former case by taking another lap if the lock has * changed since when we first looked at it. */ if (old_hres_lock != hres_lock) continue; /* * So the lock was and is locked. We'll use the old data * instead. */ old_hres_lock = shadow_hres_lock; /* * Again, disable interrupts to ensure that the thread * isn't migrated between the tsc_read() and adding * the CPU's TSC tick delta. */ flags = clear_int_flag(); tsc = tsc_read(); if (gethrtimef == tsc_gethrtime_delta) tsc += tsc_sync_tick_delta[CPU->cpu_id]; restore_int_flag(flags); /* * See the comments in tsc_gethrtime(), above. */ if (tsc >= shadow_tsc_last) tsc -= shadow_tsc_last; else if (tsc >= shadow_tsc_last - 2 * tsc_max_delta) tsc = 0; hrt = shadow_tsc_hrtime_base; TSC_CONVERT_AND_ADD(tsc, hrt, shadow_nsec_scale); } while ((old_hres_lock & ~1) != shadow_hres_lock); return (hrt); }