|
@@ -10,19 +10,55 @@
|
|
|
* Ingo Molnar <mingo@redhat.com>
|
|
|
* Guillaume Chazarain <guichaz@gmail.com>
|
|
|
*
|
|
|
- * Create a semi stable clock from a mixture of other events, including:
|
|
|
- * - gtod
|
|
|
+ *
|
|
|
+ * What:
|
|
|
+ *
|
|
|
+ * cpu_clock(i) provides a fast (execution time) high resolution
|
|
|
+ * clock with bounded drift between CPUs. The value of cpu_clock(i)
|
|
|
+ * is monotonic for constant i. The timestamp returned is in nanoseconds.
|
|
|
+ *
|
|
|
+ * ######################### BIG FAT WARNING ##########################
|
|
|
+ * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
|
|
|
+ * # go backwards !! #
|
|
|
+ * ####################################################################
|
|
|
+ *
|
|
|
+ * There is no strict promise about the base, although it tends to start
|
|
|
+ * at 0 on boot (but people really shouldn't rely on that).
|
|
|
+ *
|
|
|
+ * cpu_clock(i) -- can be used from any context, including NMI.
|
|
|
+ * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
|
|
|
+ * local_clock() -- is cpu_clock() on the current cpu.
|
|
|
+ *
|
|
|
+ * How:
|
|
|
+ *
|
|
|
+ * The implementation either uses sched_clock() when
|
|
|
+ * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
|
|
|
+ * sched_clock() is assumed to provide these properties (mostly it means
|
|
|
+ * the architecture provides a globally synchronized highres time source).
|
|
|
+ *
|
|
|
+ * Otherwise it tries to create a semi stable clock from a mixture of other
|
|
|
+ * clocks, including:
|
|
|
+ *
|
|
|
+ * - GTOD (clock monotomic)
|
|
|
* - sched_clock()
|
|
|
* - explicit idle events
|
|
|
*
|
|
|
- * We use gtod as base and the unstable clock deltas. The deltas are filtered,
|
|
|
- * making it monotonic and keeping it within an expected window.
|
|
|
+ * We use GTOD as base and use sched_clock() deltas to improve resolution. The
|
|
|
+ * deltas are filtered to provide monotonicity and keeping it within an
|
|
|
+ * expected window.
|
|
|
*
|
|
|
* Furthermore, explicit sleep and wakeup hooks allow us to account for time
|
|
|
* that is otherwise invisible (TSC gets stopped).
|
|
|
*
|
|
|
- * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
|
|
|
- * consistent between cpus (never more than 2 jiffies difference).
|
|
|
+ *
|
|
|
+ * Notes:
|
|
|
+ *
|
|
|
+ * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
|
|
|
+ * like cpufreq interrupts that can change the base clock (TSC) multiplier
|
|
|
+ * and cause funny jumps in time -- although the filtering provided by
|
|
|
+ * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
|
|
|
+ * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
|
|
|
+ * sched_clock().
|
|
|
*/
|
|
|
#include <linux/spinlock.h>
|
|
|
#include <linux/hardirq.h>
|
|
@@ -170,6 +206,11 @@ again:
|
|
|
return val;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Similar to cpu_clock(), but requires local IRQs to be disabled.
|
|
|
+ *
|
|
|
+ * See cpu_clock().
|
|
|
+ */
|
|
|
u64 sched_clock_cpu(int cpu)
|
|
|
{
|
|
|
struct sched_clock_data *scd;
|
|
@@ -237,9 +278,19 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
|
|
|
|
|
|
-unsigned long long cpu_clock(int cpu)
|
|
|
+/*
|
|
|
+ * As outlined at the top, provides a fast, high resolution, nanosecond
|
|
|
+ * time source that is monotonic per cpu argument and has bounded drift
|
|
|
+ * between cpus.
|
|
|
+ *
|
|
|
+ * ######################### BIG FAT WARNING ##########################
|
|
|
+ * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
|
|
|
+ * # go backwards !! #
|
|
|
+ * ####################################################################
|
|
|
+ */
|
|
|
+u64 cpu_clock(int cpu)
|
|
|
{
|
|
|
- unsigned long long clock;
|
|
|
+ u64 clock;
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
@@ -249,6 +300,25 @@ unsigned long long cpu_clock(int cpu)
|
|
|
return clock;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Similar to cpu_clock() for the current cpu. Time will only be observed
|
|
|
+ * to be monotonic if care is taken to only compare timestampt taken on the
|
|
|
+ * same CPU.
|
|
|
+ *
|
|
|
+ * See cpu_clock().
|
|
|
+ */
|
|
|
+u64 local_clock(void)
|
|
|
+{
|
|
|
+ u64 clock;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ clock = sched_clock_cpu(smp_processor_id());
|
|
|
+ local_irq_restore(flags);
|
|
|
+
|
|
|
+ return clock;
|
|
|
+}
|
|
|
+
|
|
|
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
|
|
|
|
|
void sched_clock_init(void)
|
|
@@ -264,12 +334,17 @@ u64 sched_clock_cpu(int cpu)
|
|
|
return sched_clock();
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-unsigned long long cpu_clock(int cpu)
|
|
|
+u64 cpu_clock(int cpu)
|
|
|
{
|
|
|
return sched_clock_cpu(cpu);
|
|
|
}
|
|
|
|
|
|
+u64 local_clock(void)
|
|
|
+{
|
|
|
+ return sched_clock_cpu(0);
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(cpu_clock);
|
|
|
+EXPORT_SYMBOL_GPL(local_clock);
|