trace_clock.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. /*
  2. * tracing clocks
  3. *
  4. * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. *
  6. * Implements 3 trace clock variants, with differing scalability/precision
  7. * tradeoffs:
  8. *
  9. * - local: CPU-local trace clock
  10. * - medium: scalable global clock with some jitter
  11. * - global: globally monotonic, serialized clock
  12. *
  13. * Tracer plugins will chose a default from these clocks.
  14. */
  15. #include <linux/spinlock.h>
  16. #include <linux/hardirq.h>
  17. #include <linux/module.h>
  18. #include <linux/percpu.h>
  19. #include <linux/sched.h>
  20. #include <linux/ktime.h>
  21. /*
  22. * trace_clock_local(): the simplest and least coherent tracing clock.
  23. *
  24. * Useful for tracing that does not cross to other CPUs nor
  25. * does it go through idle events.
  26. */
  27. u64 notrace trace_clock_local(void)
  28. {
  29. /*
  30. * sched_clock() is an architecture implemented, fast, scalable,
  31. * lockless clock. It is not guaranteed to be coherent across
  32. * CPUs, nor across CPU idle events.
  33. */
  34. return sched_clock();
  35. }
  36. /*
  37. * trace_clock(): 'inbetween' trace clock. Not completely serialized,
  38. * but not completely incorrect when crossing CPUs either.
  39. *
  40. * This is based on cpu_clock(), which will allow at most ~1 jiffy of
  41. * jitter between CPUs. So it's a pretty scalable clock, but there
  42. * can be offsets in the trace data.
  43. */
  44. u64 notrace trace_clock(void)
  45. {
  46. return cpu_clock(raw_smp_processor_id());
  47. }
  48. /*
  49. * trace_clock_global(): special globally coherent trace clock
  50. *
  51. * It has higher overhead than the other trace clocks but is still
  52. * an order of magnitude faster than GTOD derived hardware clocks.
  53. *
  54. * Used by plugins that need globally coherent timestamps.
  55. */
  56. static u64 prev_trace_clock_time;
  57. static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
  58. (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  59. u64 notrace trace_clock_global(void)
  60. {
  61. unsigned long flags;
  62. int this_cpu;
  63. u64 now;
  64. raw_local_irq_save(flags);
  65. this_cpu = raw_smp_processor_id();
  66. now = cpu_clock(this_cpu);
  67. /*
  68. * If in an NMI context then dont risk lockups and return the
  69. * cpu_clock() time:
  70. */
  71. if (unlikely(in_nmi()))
  72. goto out;
  73. __raw_spin_lock(&trace_clock_lock);
  74. /*
  75. * TODO: if this happens often then maybe we should reset
  76. * my_scd->clock to prev_trace_clock_time+1, to make sure
  77. * we start ticking with the local clock from now on?
  78. */
  79. if ((s64)(now - prev_trace_clock_time) < 0)
  80. now = prev_trace_clock_time + 1;
  81. prev_trace_clock_time = now;
  82. __raw_spin_unlock(&trace_clock_lock);
  83. out:
  84. raw_local_irq_restore(flags);
  85. return now;
  86. }