trace_clock.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. /*
  2. * tracing clocks
  3. *
  4. * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. *
  6. * Implements 3 trace clock variants, with differing scalability/precision
  7. * tradeoffs:
  8. *
  9. * - local: CPU-local trace clock
  10. * - medium: scalable global clock with some jitter
  11. * - global: globally monotonic, serialized clock
  12. *
  13. * Tracer plugins will chose a default from these clocks.
  14. */
  15. #include <linux/spinlock.h>
  16. #include <linux/hardirq.h>
  17. #include <linux/module.h>
  18. #include <linux/percpu.h>
  19. #include <linux/sched.h>
  20. #include <linux/ktime.h>
  21. #include <linux/trace_clock.h>
  22. /*
  23. * trace_clock_local(): the simplest and least coherent tracing clock.
  24. *
  25. * Useful for tracing that does not cross to other CPUs nor
  26. * does it go through idle events.
  27. */
  28. u64 notrace trace_clock_local(void)
  29. {
  30. unsigned long flags;
  31. u64 clock;
  32. /*
  33. * sched_clock() is an architecture implemented, fast, scalable,
  34. * lockless clock. It is not guaranteed to be coherent across
  35. * CPUs, nor across CPU idle events.
  36. */
  37. raw_local_irq_save(flags);
  38. clock = sched_clock();
  39. raw_local_irq_restore(flags);
  40. return clock;
  41. }
  42. /*
  43. * trace_clock(): 'inbetween' trace clock. Not completely serialized,
  44. * but not completely incorrect when crossing CPUs either.
  45. *
  46. * This is based on cpu_clock(), which will allow at most ~1 jiffy of
  47. * jitter between CPUs. So it's a pretty scalable clock, but there
  48. * can be offsets in the trace data.
  49. */
  50. u64 notrace trace_clock(void)
  51. {
  52. return cpu_clock(raw_smp_processor_id());
  53. }
  54. /*
  55. * trace_clock_global(): special globally coherent trace clock
  56. *
  57. * It has higher overhead than the other trace clocks but is still
  58. * an order of magnitude faster than GTOD derived hardware clocks.
  59. *
  60. * Used by plugins that need globally coherent timestamps.
  61. */
  62. static u64 prev_trace_clock_time;
  63. static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
  64. (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  65. u64 notrace trace_clock_global(void)
  66. {
  67. unsigned long flags;
  68. int this_cpu;
  69. u64 now;
  70. raw_local_irq_save(flags);
  71. this_cpu = raw_smp_processor_id();
  72. now = cpu_clock(this_cpu);
  73. /*
  74. * If in an NMI context then dont risk lockups and return the
  75. * cpu_clock() time:
  76. */
  77. if (unlikely(in_nmi()))
  78. goto out;
  79. __raw_spin_lock(&trace_clock_lock);
  80. /*
  81. * TODO: if this happens often then maybe we should reset
  82. * my_scd->clock to prev_trace_clock_time+1, to make sure
  83. * we start ticking with the local clock from now on?
  84. */
  85. if ((s64)(now - prev_trace_clock_time) < 0)
  86. now = prev_trace_clock_time + 1;
  87. prev_trace_clock_time = now;
  88. __raw_spin_unlock(&trace_clock_lock);
  89. out:
  90. raw_local_irq_restore(flags);
  91. return now;
  92. }