/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Copyright 2012 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014, 2016 by Delphix. All rights reserved.
*/
#include <sys/x86_archext.h>
#include <sys/archsystm.h>
#include <sys/psm_defs.h>
#include <sys/lockstat.h>
#include <sys/smp_impldefs.h>
/*
* Using the Pentium's TSC register for gethrtime()
* ------------------------------------------------
*
* The Pentium family, like many chip architectures, has a high-resolution
* timestamp counter ("TSC") which increments once per CPU cycle. The contents
* of the timestamp counter are read with the RDTSC instruction.
*
* As with its UltraSPARC equivalent (the %tick register), TSC's cycle count
* must be translated into nanoseconds in order to implement gethrtime().
* We avoid inducing floating point operations in this conversion by
* implementing the same nsec_scale algorithm as that found in the sun4u
* platform code. The sun4u NATIVE_TIME_TO_NSEC_SCALE block comment contains
* a detailed description of the algorithm; the comment is not reproduced
* here. This implementation differs only in its value for NSEC_SHIFT:
* we implement an NSEC_SHIFT of 5 (instead of sun4u's 4) to allow for
* 60 MHz Pentiums.
*
* While TSC and %tick are both cycle counting registers, TSC's functionality
* falls short in several critical ways:
*
* (a) TSCs on different CPUs are not guaranteed to be in sync. While in
* practice they often _are_ in sync, this isn't guaranteed by the
* architecture.
*
* (b) The TSC cannot be reliably set to an arbitrary value. The architecture
* only supports writing the low 32-bits of TSC, making it impractical
* to rewrite.
*
* (c) The architecture doesn't have the capacity to interrupt based on
* arbitrary values of TSC; there is no TICK_CMPR equivalent.
*
* Together, (a) and (b) imply that software must track the skew between
* TSCs and account for it (it is assumed that while there may exist skew,
* there does not exist drift). To determine the skew between CPUs, we
* have newly onlined CPUs call tsc_sync_slave(), while the CPU performing
* the online operation calls tsc_sync_master().
*
* In the absence of time-of-day clock adjustments, gethrtime() must stay in
* sync with gettimeofday(). This is problematic; given (c), the software
* cannot drive its time-of-day source from TSC, and yet they must somehow be
* kept in sync. We implement this by having a routine, tsc_tick(), which
* is called once per second from the interrupt which drives time-of-day.
*
* Note that the hrtime base for gethrtime, tsc_hrtime_base, is modified
* atomically with nsec_scale under CLOCK_LOCK. This assures that time
* monotonically increases.
*/
/*
* These two variables used to be grouped together inside of a structure that
* lived on a single cache line. A regression (bug ID 4623398) caused the
* compiler to emit code that "optimized" away the while-loops below. The
* result was that no synchronization between the onlining and onlined CPUs
* took place.
*/
static volatile int tsc_ready;
static volatile int tsc_sync_go;
/*
* Used as indices into the tsc_sync_snaps[] array.
*/
#define TSC_MASTER 0
/*
* Used in the tsc_master_sync()/tsc_slave_sync() rendezvous.
*/
}
}
static int tsc_max_delta;
typedef struct tsc_sync {
} tsc_sync_t;
static int tsc_jumped = 0;
/*
* The cap of 1 second was chosen since it is the frequency at which the
* tsc_tick() function runs which means that when gethrtime() is called it
* should never be more than 1 second since tsc_last was updated.
*/
int get_tsc_ready();
static inline
if (a > tsc_resume_cap) {
return (tsc_resume_cap);
}
return (a);
}
tsc_gethrtime(void)
{
do {
/*
* It would seem to be obvious that this is true
* (that is, the past is less than the present),
* cycles. If we manage to call gethrtime()
* after a resume, but before the first call to
* tsc_tick(), we will see the jump. In this case,
* we will simply use the value in TSC as the delta.
*/
/*
* There is a chance that tsc_tick() has just run on
* another CPU, and we have drifted just enough so that
* we appear behind tsc_last. In this case, force the
* delta to be zero.
*/
tsc = 0;
} else {
/*
* If we reach this else clause we assume that we have
* current tsc value as the delta.
*
* In rare cases we can reach this else clause due to
* a lack of monotonicity in the TSC value. In such
* cases using the current TSC value as the delta would
* cause us to return a value ~2x of what it should
* be. To protect against these cases we cap the
*/
}
return (hrt);
}
tsc_gethrtime_delta(void)
{
do {
/*
* We need to disable interrupts here to assure that we
* don't migrate between the call to tsc_read() and
* adding the CPU's TSC tick delta. Note that disabling
* and reenabling preemption is forbidden here because
* we may be in the middle of a fast trap. In the amd64
* kernel we cannot tolerate preemption during a fast
* trap. See _update_sregs().
*/
flags = clear_int_flag();
/* See comments in tsc_gethrtime() above */
tsc = 0;
} else {
}
return (hrt);
}
tsc_gethrtime_tick_delta(void)
{
flags = clear_int_flag();
return (hrt);
}
/*
* This is similar to the above, but it cannot actually spin on hres_lock.
* As a result, it caches all of the variables it needs; if the variables
* don't change, it's done.
*/
dtrace_gethrtime(void)
{
do {
/*
* Interrupts are disabled to ensure that the thread isn't
* migrated between the tsc_read() and adding the CPU's
* TSC tick delta.
*/
flags = clear_int_flag();
if (gethrtimef == tsc_gethrtime_delta)
/*
* See the comments in tsc_gethrtime(), above.
*/
tsc = 0;
else
break;
/*
* If we're here, the clock lock is locked -- or it has been
* unlocked and locked since we looked. This may be due to
* tsc_tick() running on another CPU -- or it may be because
* some code path has ended up in dtrace_probe() with
* CLOCK_LOCK held. We'll try to determine that we're in
* the former case by taking another lap if the lock has
* changed since when we first looked at it.
*/
if (old_hres_lock != hres_lock)
continue;
/*
* So the lock was and is locked. We'll use the old data
* instead.
*/
/*
* Again, disable interrupts to ensure that the thread
* isn't migrated between the tsc_read() and adding
* the CPU's TSC tick delta.
*/
flags = clear_int_flag();
if (gethrtimef == tsc_gethrtime_delta)
/*
* See the comments in tsc_gethrtime(), above.
*/
if (tsc >= shadow_tsc_last)
tsc -= shadow_tsc_last;
tsc = 0;
else
return (hrt);
}
tsc_gethrtimeunscaled(void)
{
do {
/* See tsc_tick(). */
return (tsc);
}
/*
* Convert a nanosecond based timestamp to tsc
*/
{
if (tsc_gethrtime_enable) {
return (tsc);
}
}
/* Convert a tsc timestamp to nanoseconds */
void
{
return;
}
{
/*
* Similarly to tsc_gethrtime_delta, we need to disable preemption
* to prevent migration between the call to tsc_gethrtimeunscaled
* and adding the CPU's hrtime delta. Note that disabling and
* reenabling preemption is forbidden here because we may be in the
* middle of a fast trap. In the amd64 kernel we cannot tolerate
* preemption during a fast trap. See _update_sregs().
*/
flags = clear_int_flag();
return (hrt);
}
/*
* Called by the master in the TSC sync operation (usually the boot CPU).
* If the slave is discovered to have a skew, gethrtimef will be changed to
* point to tsc_gethrtime_delta(). Calculating skews is precise only when
* the master and slave TSCs are read simultaneously; however, there is no
* algorithm that can read both CPUs in perfect simultaneity. The proposed
* algorithm is an approximate method based on the behaviour of cache
* management. The slave CPU continuously reads TSC and then reads a global
* variable which the master CPU updates. The moment the master's update reaches
* the slave's visibility (being forced by an mfence operation) we use the TSC
* reading taken on the slave. A corresponding TSC read will be taken on the
* master as soon as possible after finishing the mfence operation. But the
* delay between causing the slave to notice the invalid cache line and the
* competion of mfence is not repeatable. This error is heuristically assumed
* to be 1/4th of the total write time as being measured by the two TSC reads
* on the master sandwiching the mfence. Furthermore, due to the nature of
* bus arbitration, contention on memory bus, etc., the time taken for the write
* to reflect globally can vary a lot. So instead of taking a single reading,
* a set of readings are taken and the one with least write time is chosen
* to calculate the final skew.
*
* TSC sync is disabled in the context of virtualization because the CPUs
* assigned to the guest are virtual CPUs which means the real CPUs on which
* guest runs keep changing during life time of guest OS. So we would end up
* calculating TSC skews for a set of CPUs during boot whereas the guest
* might migrate to a different set of physical CPUs at a later point of
* time.
*/
void
{
int cnt;
int hwtype;
return;
flags = clear_int_flag();
while (tsc_sync_go != TSC_SYNC_GO)
SMT_PAUSE();
membar_enter();
mtsc_after = tsc_read();
while (tsc_sync_go != TSC_SYNC_DONE)
SMT_PAUSE();
if (write_time <= min_write_time) {
/*
* Apply heuristic adjustment only if the calculated
* delta is > 1/4th of the write time.
*/
if (x < 0)
x = -x;
if (x > (min_write_time/4))
/*
* Subtract 1/4th of the measured write time
* from the master's TSC value, as an estimate
* of how late the mfence completion came
* after the slave noticed the cache line
* change.
*/
else
}
membar_enter();
}
if (tdelta < 0)
if (tdelta > largest_tsc_delta)
/*
* Enable delta variants of tsc functions if the largest of all chosen
* deltas is > smallest of the write time.
*/
if (largest_tsc_delta > shortest_write_time) {
}
}
/*
* Called by a CPU which has just been onlined. It is expected that the CPU
* performing the online operation will call tsc_sync_master().
*
* TSC sync is disabled in the context of virtualization. See comments
* above tsc_sync_master.
*/
void
tsc_sync_slave(void)
{
int cnt;
int hwtype;
return;
flags = clear_int_flag();
/* Re-fill the cache line */
membar_enter();
do {
/*
* Do not put an SMT_PAUSE here. For instance,
* if the master and slave are really the same
* hyper-threaded CPU, then you want the master
* to yield to the slave as quickly as possible here,
* but not the other way.
*/
} while (tsc->master_tsc == 0);
membar_enter();
while (tsc_sync_go != TSC_SYNC_STOP)
SMT_PAUSE();
}
}
/*
* Called once per second on a CPU from the cyclic subsystem's
* CY_HIGH_LEVEL interrupt. (No longer just cpu0-only)
*/
void
tsc_tick(void)
{
/*
* Before we set the new variables, we set the shadow values. This
* allows for lock free operation in dtrace_gethrtime().
*/
CLOCK_LOCK(&spl);
if (gethrtimef == tsc_gethrtime_delta)
/*
* The TSC has just jumped into the past. We assume that
* to use the _current_ value of TSC as the delta. This
* will keep tsc_hrtime_base correct. We're also going to
* assume that rate of tsc does not change after a suspend
* resume (i.e nsec_scale remains the same).
*/
tsc_jumped = 1;
} else {
/*
* Determine the number of TSC ticks since the last clock
* tick, and add that to the hrtime base.
*/
}
}
void
{
extern int gethrtime_hires;
/*
* cpu_freq_hz is the measured cpu frequency in hertz
*/
/*
* We can't accommodate CPUs slower than 31.25 MHz.
*/
flags = clear_int_flag();
(void) tsc_gethrtime();
gethrtime_hires = 1;
/*
* Allocate memory for the structure used in the tsc sync logic.
* This structure should be aligned on a multiple of cache line size.
*/
/*
* Convert the TSC resume cap ns value into its unscaled TSC value.
* See tsc_gethrtime().
*/
if (tsc_resume_cap == 0)
}
int
{
return (tsc_ready);
}
/*
* Adjust all the deltas by adding the passed value to the array.
* Then use the "delt" versions of the the gethrtime functions.
* Note that 'tdelta' _could_ be a negative number, which should
* reduce the values in the array (used, for example, if the Solaris
* instance was moved by a virtual manager to a machine with a higher
* value of tsc).
*/
void
{
int i;
for (i = 0; i < NCPU; i++) {
tsc_sync_tick_delta[i] += tdelta;
}
}
/*
* Functions to manage TSC and high-res time on suspend and resume.
*/
/*
* declarations needed for time adjustment
*/
extern void rtcsync(void);
/* There must be a better way than exposing nsec_scale! */
extern uint_t nsec_scale;
int tsc_delta_onsuspend = 0;
int tsc_suspend_count = 0;
int tsc_resume_in_cyclic = 0;
/*
* Let timestamp.c know that we are suspending. It needs to take
* snapshots of the current time, and do any pre-suspend work.
*/
void
tsc_suspend(void)
{
/*
* What we need to do here, is to get the time we suspended, so that we
* know how much we should add to the resume.
* This routine is called by each CPU, so we need to handle reentry.
*/
if (tsc_gethrtime_enable) {
/*
* We put the tsc_read() inside the lock as it
* as no locking constraints, and it puts the
* aquired value closer to the time stamp (in
* case we delay getting the lock).
*/
tsc_saved_tsc = tsc_read();
/* We only want to do this once. */
if (tsc_needs_resume == 0) {
if (tsc_delta_onsuspend) {
} else {
}
}
}
tsc_needs_resume = 1;
}
/*
* Restore all timestamp state based on the snapshots taken at
* suspend time.
*/
void
tsc_resume(void)
{
/*
* We only need to (and want to) do this once. So let the first
* caller handle this (we are locked by the cpu lock), as it
* is preferential that we get the earliest sync.
*/
if (tsc_needs_resume) {
/*
* If using the TSC, adjust the delta based on how long
* we were sleeping (or away). We also adjust for
* migration and a grown TSC.
*/
if (tsc_saved_tsc != 0) {
int sleep_sec;
extern void tsc_tick(void);
extern uint64_t cpu_freq_hz;
/* tsc_read() MUST be before TODOP_GET() */
/* Compute seconds of sleep time */
/*
* If the saved sec is less that or equal to
* the current ts, then there is likely a
* problem with the clock. Assume at least
* one second has passed, so that time goes forward.
*/
if (sleep_sec <= 0) {
sleep_sec = 1;
}
/* How many TSC's should have occured while sleeping */
if (tsc_adjust_seconds)
/*
* We also want to subtract from the "sleep_tsc"
* the current value of tsc_read(), so that our
* adjustment accounts for the amount of time we
* have been resumed _or_ an adjustment based on
* the fact that we didn't actually power off the
* CPU (migration is another issue, but _should_
* also comply with this calculation). If the CPU
* never powered off, then:
* 'now == sleep_tsc + saved_tsc'
* and the delta will effectively be "0".
*/
if (tsc_delta_onsuspend) {
} else {
}
tsc_saved_tsc = 0;
tsc_tick();
}
tsc_needs_resume = 0;
}
}